gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
import six
from heat.common import exception
from heat.common import identifier
from heat.engine import parameters
from heat.engine import template
from heat.tests import common
def new_parameter(name, schema, value=None, validate_value=True):
tmpl = template.Template({'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {name: schema}})
schema = tmpl.param_schemata()[name]
param = parameters.Parameter(name, schema, value)
param.validate(validate_value)
return param
class ParameterTestCommon(common.HeatTestCase):
scenarios = [
('type_string', dict(p_type='String',
inst=parameters.StringParam,
value='test',
expected='test',
allowed_value=['foo'],
zero='',
default='default')),
('type_number', dict(p_type='Number',
inst=parameters.NumberParam,
value=10,
expected='10',
allowed_value=[42],
zero=0,
default=13)),
('type_list', dict(p_type='CommaDelimitedList',
inst=parameters.CommaDelimitedListParam,
value=['a', 'b', 'c'],
expected='a,b,c',
allowed_value=['foo'],
zero=[],
default=['d', 'e', 'f'])),
('type_json', dict(p_type='Json',
inst=parameters.JsonParam,
value={'a': '1'},
expected='{"a": "1"}',
allowed_value=[{'foo': 'bar'}],
zero={},
default={'d': '1'})),
('type_int_json', dict(p_type='Json',
inst=parameters.JsonParam,
value={'a': 1},
expected='{"a": 1}',
allowed_value=[{'foo': 'bar'}],
zero={},
default={'d': 1})),
('type_boolean', dict(p_type='Boolean',
inst=parameters.BooleanParam,
value=True,
expected='True',
allowed_value=[False],
zero=False,
default=True))
]
def test_new_param(self):
p = new_parameter('p', {'Type': self.p_type}, validate_value=False)
self.assertIsInstance(p, self.inst)
def test_param_to_str(self):
p = new_parameter('p', {'Type': self.p_type}, self.value)
if self.p_type == 'Json':
self.assertEqual(json.loads(self.expected), json.loads(str(p)))
else:
self.assertEqual(self.expected, str(p))
def test_default_no_override(self):
p = new_parameter('defaulted', {'Type': self.p_type,
'Default': self.default})
self.assertTrue(p.has_default())
self.assertEqual(self.default, p.default())
self.assertEqual(self.default, p.value())
def test_default_override(self):
p = new_parameter('defaulted', {'Type': self.p_type,
'Default': self.default},
self.value)
self.assertTrue(p.has_default())
self.assertEqual(self.default, p.default())
self.assertEqual(self.value, p.value())
def test_default_invalid(self):
schema = {'Type': self.p_type,
'AllowedValues': self.allowed_value,
'ConstraintDescription': 'wibble',
'Default': self.default}
if self.p_type == 'Json':
err = self.assertRaises(exception.InvalidSchemaError,
new_parameter, 'p', schema)
self.assertIn('AllowedValues constraint invalid for Json',
six.text_type(err))
else:
err = self.assertRaises(exception.InvalidSchemaError,
new_parameter, 'p', schema)
self.assertIn('wibble', six.text_type(err))
def test_description(self):
description = 'Description of the parameter'
p = new_parameter('p', {'Type': self.p_type,
'Description': description},
validate_value=False)
self.assertEqual(description, p.description())
def test_no_description(self):
p = new_parameter('p', {'Type': self.p_type}, validate_value=False)
self.assertEqual('', p.description())
def test_no_echo_true(self):
p = new_parameter('anechoic', {'Type': self.p_type,
'NoEcho': 'true'},
self.value)
self.assertTrue(p.hidden())
self.assertEqual('******', str(p))
def test_no_echo_true_caps(self):
p = new_parameter('anechoic', {'Type': self.p_type,
'NoEcho': 'TrUe'},
self.value)
self.assertTrue(p.hidden())
self.assertEqual('******', str(p))
def test_no_echo_false(self):
p = new_parameter('echoic', {'Type': self.p_type,
'NoEcho': 'false'},
self.value)
self.assertFalse(p.hidden())
if self.p_type == 'Json':
self.assertEqual(json.loads(self.expected), json.loads(str(p)))
else:
self.assertEqual(self.expected, str(p))
def test_default_empty(self):
p = new_parameter('defaulted', {'Type': self.p_type,
'Default': self.zero})
self.assertTrue(p.has_default())
self.assertEqual(self.zero, p.default())
self.assertEqual(self.zero, p.value())
def test_default_no_empty_user_value_empty(self):
p = new_parameter('defaulted', {'Type': self.p_type,
'Default': self.default},
self.zero)
self.assertTrue(p.has_default())
self.assertEqual(self.default, p.default())
self.assertEqual(self.zero, p.value())
class ParameterTestSpecific(common.HeatTestCase):
def test_new_bad_type(self):
self.assertRaises(exception.InvalidSchemaError, new_parameter,
'p', {'Type': 'List'}, validate_value=False)
def test_string_len_good(self):
schema = {'Type': 'String',
'MinLength': '3',
'MaxLength': '3'}
p = new_parameter('p', schema, 'foo')
self.assertEqual('foo', p.value())
def test_string_underflow(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'MinLength': '4'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'foo')
self.assertIn('wibble', six.text_type(err))
def test_string_overflow(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'MaxLength': '2'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'foo')
self.assertIn('wibble', six.text_type(err))
def test_string_pattern_good(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
p = new_parameter('p', schema, 'foo')
self.assertEqual('foo', p.value())
def test_string_pattern_bad_prefix(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '1foo')
self.assertIn('wibble', six.text_type(err))
def test_string_pattern_bad_suffix(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'foo1')
self.assertIn('wibble', six.text_type(err))
def test_string_value_list_good(self):
schema = {'Type': 'String',
'AllowedValues': ['foo', 'bar', 'baz']}
p = new_parameter('p', schema, 'bar')
self.assertEqual('bar', p.value())
def test_string_value_unicode(self):
schema = {'Type': 'String'}
p = new_parameter('p', schema, u'test\u2665')
self.assertEqual(u'test\u2665', p.value())
def test_string_value_list_bad(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'AllowedValues': ['foo', 'bar', 'baz']}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'blarg')
self.assertIn('wibble', six.text_type(err))
def test_number_int_good(self):
schema = {'Type': 'Number',
'MinValue': '3',
'MaxValue': '3'}
p = new_parameter('p', schema, '3')
self.assertEqual(3, p.value())
def test_number_float_good_string(self):
schema = {'Type': 'Number',
'MinValue': '3.0',
'MaxValue': '4.0'}
p = new_parameter('p', schema, '3.5')
self.assertEqual(3.5, p.value())
def test_number_float_good_number(self):
schema = {'Type': 'Number',
'MinValue': '3.0',
'MaxValue': '4.0'}
p = new_parameter('p', schema, 3.5)
self.assertEqual(3.5, p.value())
def test_number_low(self):
schema = {'Type': 'Number',
'ConstraintDescription': 'wibble',
'MinValue': '4'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '3')
self.assertIn('wibble', six.text_type(err))
def test_number_high(self):
schema = {'Type': 'Number',
'ConstraintDescription': 'wibble',
'MaxValue': '2'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '3')
self.assertIn('wibble', six.text_type(err))
def test_number_bad(self):
schema = {'Type': 'Number'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'str')
self.assertIn('float', six.text_type(err))
def test_number_value_list_good(self):
schema = {'Type': 'Number',
'AllowedValues': ['1', '3', '5']}
p = new_parameter('p', schema, '5')
self.assertEqual(5, p.value())
def test_number_value_list_bad(self):
schema = {'Type': 'Number',
'ConstraintDescription': 'wibble',
'AllowedValues': ['1', '3', '5']}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '2')
self.assertIn('wibble', six.text_type(err))
def test_list_value_list_default_empty(self):
schema = {'Type': 'CommaDelimitedList', 'Default': ''}
p = new_parameter('p', schema)
self.assertEqual([], p.value())
def test_list_value_list_good(self):
schema = {'Type': 'CommaDelimitedList',
'AllowedValues': ['foo', 'bar', 'baz']}
p = new_parameter('p', schema, 'baz,foo,bar')
self.assertEqual('baz,foo,bar'.split(','), p.value())
schema['Default'] = []
p = new_parameter('p', schema)
self.assertEqual([], p.value())
schema['Default'] = 'baz,foo,bar'
p = new_parameter('p', schema)
self.assertEqual('baz,foo,bar'.split(','), p.value())
schema['AllowedValues'] = ['1', '3', '5']
schema['Default'] = []
p = new_parameter('p', schema, [1, 3, 5])
self.assertEqual('1,3,5', str(p))
schema['Default'] = [1, 3, 5]
p = new_parameter('p', schema)
self.assertEqual('1,3,5'.split(','), p.value())
def test_list_value_list_bad(self):
schema = {'Type': 'CommaDelimitedList',
'ConstraintDescription': 'wibble',
'AllowedValues': ['foo', 'bar', 'baz']}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema,
'foo,baz,blarg')
self.assertIn('wibble', six.text_type(err))
def test_list_validate_good(self):
schema = {'Type': 'CommaDelimitedList'}
val = ['foo', 'bar', 'baz']
val_s = 'foo,bar,baz'
p = new_parameter('p', schema, val_s, validate_value=False)
p.validate()
self.assertEqual(val, p.value())
self.assertEqual(val, p.parsed)
def test_list_validate_bad(self):
schema = {'Type': 'CommaDelimitedList'}
# just need something here that is growing to throw an AttributeError
# when .split() is called
val_s = 0
p = new_parameter('p', schema, validate_value=False)
p.user_value = val_s
err = self.assertRaises(exception.StackValidationFailed,
p.validate)
self.assertIn('Parameter \'p\' is invalid', six.text_type(err))
def test_map_value(self):
'''Happy path for value that's already a map.'''
schema = {'Type': 'Json'}
val = {"foo": "bar", "items": [1, 2, 3]}
p = new_parameter('p', schema, val)
self.assertEqual(val, p.value())
self.assertEqual(val, p.parsed)
def test_map_value_bad(self):
'''Map value is not JSON parsable.'''
schema = {'Type': 'Json',
'ConstraintDescription': 'wibble'}
val = {"foo": "bar", "not_json": len}
err = self.assertRaises(ValueError,
new_parameter, 'p', schema, val)
self.assertIn('Value must be valid JSON', six.text_type(err))
def test_map_value_parse(self):
'''Happy path for value that's a string.'''
schema = {'Type': 'Json'}
val = {"foo": "bar", "items": [1, 2, 3]}
val_s = json.dumps(val)
p = new_parameter('p', schema, val_s)
self.assertEqual(val, p.value())
self.assertEqual(val, p.parsed)
def test_map_value_bad_parse(self):
'''Test value error for unparsable string value.'''
schema = {'Type': 'Json',
'ConstraintDescription': 'wibble'}
val = "I am not a map"
err = self.assertRaises(ValueError,
new_parameter, 'p', schema, val)
self.assertIn('Value must be valid JSON', six.text_type(err))
def test_map_underrun(self):
'''Test map length under MIN_LEN.'''
schema = {'Type': 'Json',
'MinLength': 3}
val = {"foo": "bar", "items": [1, 2, 3]}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, val)
self.assertIn('out of range', six.text_type(err))
def test_map_overrun(self):
'''Test map length over MAX_LEN.'''
schema = {'Type': 'Json',
'MaxLength': 1}
val = {"foo": "bar", "items": [1, 2, 3]}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, val)
self.assertIn('out of range', six.text_type(err))
def test_json_list(self):
schema = {'Type': 'Json'}
val = ["fizz", "buzz"]
p = new_parameter('p', schema, val)
self.assertIsInstance(p.value(), list)
self.assertIn("fizz", p.value())
self.assertIn("buzz", p.value())
def test_json_string_list(self):
schema = {'Type': 'Json'}
val = '["fizz", "buzz"]'
p = new_parameter('p', schema, val)
self.assertIsInstance(p.value(), list)
self.assertIn("fizz", p.value())
self.assertIn("buzz", p.value())
def test_json_validate_good(self):
schema = {'Type': 'Json'}
val = {"foo": "bar", "items": [1, 2, 3]}
val_s = json.dumps(val)
p = new_parameter('p', schema, val_s, validate_value=False)
p.validate()
self.assertEqual(val, p.value())
self.assertEqual(val, p.parsed)
def test_json_validate_bad(self):
schema = {'Type': 'Json'}
val_s = '{"foo": "bar", "invalid": ]}'
p = new_parameter('p', schema, validate_value=False)
p.user_value = val_s
err = self.assertRaises(exception.StackValidationFailed,
p.validate)
self.assertIn('Parameter \'p\' is invalid', six.text_type(err))
def test_bool_value_true(self):
schema = {'Type': 'Boolean'}
for val in ('1', 't', 'true', 'on', 'y', 'yes', True, 1):
bo = new_parameter('bo', schema, val)
self.assertTrue(bo.value())
def test_bool_value_false(self):
schema = {'Type': 'Boolean'}
for val in ('0', 'f', 'false', 'off', 'n', 'no', False, 0):
bo = new_parameter('bo', schema, val)
self.assertFalse(bo.value())
def test_bool_value_invalid(self):
schema = {'Type': 'Boolean'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'bo', schema, 'foo')
self.assertIn("Unrecognized value 'foo'", six.text_type(err))
def test_missing_param_str(self):
'''Test missing user parameter.'''
self.assertRaises(exception.UserParameterMissing,
new_parameter, 'p',
{'Type': 'String'})
def test_missing_param_list(self):
'''Test missing user parameter.'''
self.assertRaises(exception.UserParameterMissing,
new_parameter, 'p',
{'Type': 'CommaDelimitedList'})
def test_missing_param_map(self):
'''Test missing user parameter.'''
self.assertRaises(exception.UserParameterMissing,
new_parameter, 'p',
{'Type': 'Json'})
def test_param_name_in_error_message(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'testparam', schema, '234')
expected = ("Parameter 'testparam' is invalid: "
'"234" does not match pattern "[a-z]*"')
self.assertEqual(expected, six.text_type(err))
params_schema = json.loads('''{
"Parameters" : {
"User" : { "Type": "String" },
"Defaulted" : {
"Type": "String",
"Default": "foobar"
}
}
}''')
class ParametersBase(common.HeatTestCase):
def new_parameters(self, stack_name, tmpl, user_params=None,
stack_id=None, validate_value=True,
param_defaults=None):
user_params = user_params or {}
tmpl.update({'HeatTemplateFormatVersion': '2012-12-12'})
tmpl = template.Template(tmpl)
params = tmpl.parameters(
identifier.HeatIdentifier('', stack_name, stack_id),
user_params, param_defaults=param_defaults)
params.validate(validate_value)
return params
class ParametersTest(ParametersBase):
def test_pseudo_params(self):
stack_name = 'test_stack'
params = self.new_parameters(stack_name, {"Parameters": {}})
self.assertEqual('test_stack', params['AWS::StackName'])
self.assertEqual(
'arn:openstack:heat:::stacks/{0}/{1}'.format(stack_name, 'None'),
params['AWS::StackId'])
self.assertIn('AWS::Region', params)
def test_pseudo_param_stackid(self):
stack_name = 'test_stack'
params = self.new_parameters(stack_name, {'Parameters': {}},
stack_id='abc123')
self.assertEqual(
'arn:openstack:heat:::stacks/{0}/{1}'.format(stack_name, 'abc123'),
params['AWS::StackId'])
stack_identifier = identifier.HeatIdentifier('', '', 'def456')
params.set_stack_id(stack_identifier)
self.assertEqual(stack_identifier.arn(), params['AWS::StackId'])
def test_schema_invariance(self):
params1 = self.new_parameters('test', params_schema,
{'User': 'foo',
'Defaulted': 'wibble'})
self.assertEqual('wibble', params1['Defaulted'])
params2 = self.new_parameters('test', params_schema, {'User': 'foo'})
self.assertEqual('foobar', params2['Defaulted'])
def test_to_dict(self):
template = {'Parameters': {'Foo': {'Type': 'String'},
'Bar': {'Type': 'Number', 'Default': '42'}}}
params = self.new_parameters('test_params', template, {'Foo': 'foo'})
as_dict = dict(params)
self.assertEqual('foo', as_dict['Foo'])
self.assertEqual(42, as_dict['Bar'])
self.assertEqual('test_params', as_dict['AWS::StackName'])
self.assertIn('AWS::Region', as_dict)
def test_map(self):
template = {'Parameters': {'Foo': {'Type': 'String'},
'Bar': {'Type': 'Number', 'Default': '42'}}}
params = self.new_parameters('test_params', template, {'Foo': 'foo'})
expected = {'Foo': False,
'Bar': True,
'AWS::Region': True,
'AWS::StackId': True,
'AWS::StackName': True}
self.assertEqual(expected, params.map(lambda p: p.has_default()))
def test_map_str(self):
template = {'Parameters': {'Foo': {'Type': 'String'},
'Bar': {'Type': 'Number'},
'Uni': {'Type': 'String'}}}
stack_name = 'test_params'
params = self.new_parameters(stack_name, template,
{'Foo': 'foo',
'Bar': '42',
'Uni': u'test\u2665'})
expected = {'Foo': 'foo',
'Bar': '42',
'Uni': b'test\xe2\x99\xa5',
'AWS::Region': 'ap-southeast-1',
'AWS::StackId':
'arn:openstack:heat:::stacks/{0}/{1}'.format(
stack_name,
'None'),
'AWS::StackName': 'test_params'}
mapped_params = params.map(six.text_type)
mapped_params['Uni'] = mapped_params['Uni'].encode('utf-8')
self.assertEqual(expected, mapped_params)
def test_unknown_params(self):
user_params = {'Foo': 'wibble'}
self.assertRaises(exception.UnknownUserParameter,
self.new_parameters,
'test',
params_schema,
user_params)
def test_missing_params(self):
user_params = {}
self.assertRaises(exception.UserParameterMissing,
self.new_parameters,
'test',
params_schema,
user_params)
def test_missing_attribute_params(self):
params = {'Parameters': {'Foo': {'Type': 'String'},
'NoAttr': 'No attribute.',
'Bar': {'Type': 'Number', 'Default': '1'}}}
self.assertRaises(exception.InvalidSchemaError,
self.new_parameters,
'test',
params)
class ParameterDefaultsTest(ParametersBase):
scenarios = [
('type_list', dict(p_type='CommaDelimitedList',
value='1,1,1',
expected=[['4', '2'], ['7', '7'], ['1', '1', '1']],
param_default='7,7',
default='4,2')),
('type_number', dict(p_type='Number',
value=111,
expected=[42, 77, 111],
param_default=77,
default=42)),
('type_string', dict(p_type='String',
value='111',
expected=['42', '77', '111'],
param_default='77',
default='42')),
('type_json', dict(p_type='Json',
value={'1': '11'},
expected=[{'4': '2'}, {'7': '7'}, {'1': '11'}],
param_default={'7': '7'},
default={'4': '2'})),
('type_boolean1', dict(p_type='Boolean',
value=True,
expected=[False, False, True],
param_default=False,
default=False)),
('type_boolean2', dict(p_type='Boolean',
value=False,
expected=[False, True, False],
param_default=True,
default=False)),
('type_boolean3', dict(p_type='Boolean',
value=False,
expected=[True, False, False],
param_default=False,
default=True))]
def test_use_expected_default(self):
template = {'Parameters': {'a': {'Type': self.p_type,
'Default': self.default}}}
params = self.new_parameters('test_params', template)
self.assertEqual(self.expected[0], params['a'])
params = self.new_parameters('test_params', template,
param_defaults={'a': self.param_default})
self.assertEqual(self.expected[1], params['a'])
params = self.new_parameters('test_params', template,
{'a': self.value},
param_defaults={'a': self.param_default})
self.assertEqual(self.expected[2], params['a'])
class ParameterSchemaTest(common.HeatTestCase):
def test_validate_schema_wrong_key(self):
error = self.assertRaises(exception.InvalidSchemaError,
parameters.Schema.from_dict, 'param_name',
{"foo": "bar"})
self.assertEqual("Invalid key 'foo' for parameter (param_name)",
six.text_type(error))
def test_validate_schema_no_type(self):
error = self.assertRaises(exception.InvalidSchemaError,
parameters.Schema.from_dict,
'broken',
{"Description": "Hi!"})
self.assertEqual("Missing parameter type for parameter: broken",
six.text_type(error))
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cache_stats(base_resource) :
def __init__(self) :
self._clearstats = ""
self._cachemaxmemorykb = 0
self._cacherecentpercentsuccessfulrevalidation = 0
self._cacherecentpercentstoreablemiss = 0
self._cacherecentpercentparameterizedhits = 0
self._cacherecentpercentoriginbandwidthsaved = 0
self._cacherecentpercenthit = 0
self._cacherecentpercentbytehit = 0
self._cacherecentpercent304hits = 0
self._cacheutilizedmemorykb = 0
self._cachemaxmemoryactivekb = 0
self._cache64maxmemorykb = 0
self._cachepercentpethits = 0
self._cachetotpethits = 0
self._cachepethitsrate = 0
self._cachepercentparameterized304hits = 0
self._cachetotparameterizedhits = 0
self._cacheparameterizedhitsrate = 0
self._cachepercentsuccessfulrevalidation = 0
self._cachepercentstoreablemiss = 0
self._cachetotfulltoconditionalrequest = 0
self._cachefulltoconditionalrequestrate = 0
self._cachetotsuccessfulrevalidation = 0
self._cachesuccessfulrevalidationrate = 0
self._cachetotrevalidationmiss = 0
self._cacherevalidationmissrate = 0
self._cachetotnonstoreablemisses = 0
self._cachenonstoreablemissesrate = 0
self._cachetotstoreablemisses = 0
self._cachestoreablemissesrate = 0
self._cachecompressedbytesserved = 0
self._cachecompressedbytesservedrate = 0
self._cachepercentbytehit = 0
self._cachebytesserved = 0
self._cachebytesservedrate = 0
self._cachetotresponsebytes = 0
self._cacheresponsebytesrate = 0
self._cachepercent304hits = 0
self._cachenummarker = 0
self._cachepercentoriginbandwidthsaved = 0
self._cachepercenthit = 0
self._cachetotmisses = 0
self._cachemissesrate = 0
self._cachetothits = 0
self._cachehitsrate = 0
self._cachetotrequests = 0
self._cacherequestsrate = 0
self._cachenumcached = 0
self._cachecurhits = 0
self._cachecurmisses = 0
self._cachetotnon304hits = 0
self._cachenon304hitsrate = 0
self._cachetot304hits = 0
self._cache304hitsrate = 0
self._cachetotsqlhits = 0
self._cachesqlhitsrate = 0
self._cachetotexpireatlastbyte = 0
self._cacheexpireatlastbyterate = 0
self._cachetotflashcachemisses = 0
self._cacheflashcachemissesrate = 0
self._cachetotflashcachehits = 0
self._cacheflashcachehitsrate = 0
self._cachetotparameterizedinvalidationrequests = 0
self._cacheparameterizedinvalidationrequestsrate = 0
self._cachetotnonparameterizedinvalidationrequests = 0
self._cachenonparameterizedinvalidationrequestsrate = 0
self._cachetotinvalidationrequests = 0
self._cacheinvalidationrequestsrate = 0
self._cachetotparameterizedrequests = 0
self._cacheparameterizedrequestsrate = 0
self._cachetotparameterizednon304hits = 0
self._cacheparameterizednon304hitsrate = 0
self._cachetotparameterized304hits = 0
self._cacheparameterized304hitsrate = 0
self._cachetotpetrequests = 0
self._cachepetrequestsrate = 0
self._cacheerrmemalloc = 0
self._cachelargestresponsereceived = 0
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def cachehitsrate(self) :
"""Rate (/s) counter for cachetothits.
"""
try :
return self._cachehitsrate
except Exception as e:
raise e
@property
def cachepethitsrate(self) :
"""Rate (/s) counter for cachetotpethits.
"""
try :
return self._cachepethitsrate
except Exception as e:
raise e
@property
def cachetotexpireatlastbyte(self) :
"""Instances of content expiring immediately after receiving the last body byte due to the Expire at Last Byte setting for the content group.
"""
try :
return self._cachetotexpireatlastbyte
except Exception as e:
raise e
@property
def cachenummarker(self) :
"""Marker objects created when a response exceeds the maximum or minimum size for entries in its content group or has not yet received the minimum number of hits required for items in its content group.
"""
try :
return self._cachenummarker
except Exception as e:
raise e
@property
def cachecurhits(self) :
"""This number should be close to the number of hits being served currently.
"""
try :
return self._cachecurhits
except Exception as e:
raise e
@property
def cacheresponsebytesrate(self) :
"""Rate (/s) counter for cachetotresponsebytes.
"""
try :
return self._cacheresponsebytesrate
except Exception as e:
raise e
@property
def cacheutilizedmemorykb(self) :
"""Amount of memory the integrated cache is currently using.
"""
try :
return self._cacheutilizedmemorykb
except Exception as e:
raise e
@property
def cachetotnonparameterizedinvalidationrequests(self) :
"""Requests that match an invalidation policy where the invalGroups parameter is configured and expires one or more content groups.
"""
try :
return self._cachetotnonparameterizedinvalidationrequests
except Exception as e:
raise e
@property
def cachepercentpethits(self) :
"""Percentage of cache hits in content groups that have Poll Every Time enabled, relative to all searches of content groups with Poll Every Time enabled. .
"""
try :
return self._cachepercentpethits
except Exception as e:
raise e
@property
def cachebytesserved(self) :
"""Total number of bytes served from the integrated cache.
"""
try :
return self._cachebytesserved
except Exception as e:
raise e
@property
def cachetotparameterizednon304hits(self) :
"""Parameterized requests resulting in a full response (not status code 304: Object Not Updated) served from the cache.
"""
try :
return self._cachetotparameterizednon304hits
except Exception as e:
raise e
@property
def cachebytesservedrate(self) :
"""Rate (/s) counter for cachebytesserved.
"""
try :
return self._cachebytesservedrate
except Exception as e:
raise e
@property
def cachepercent304hits(self) :
"""304 responses as a percentage of all responses that the NetScaler served.
"""
try :
return self._cachepercent304hits
except Exception as e:
raise e
@property
def cachepercentbytehit(self) :
"""Bytes served from the cache divided by total bytes served to the client. If compression is On in the NetScaler, this ratio may not reflect the bytes served by the compression module. If the compression is Off, this ratio is the same as cachePercentOriginBandwidthSaved.
"""
try :
return self._cachepercentbytehit
except Exception as e:
raise e
@property
def cachepercentstoreablemiss(self) :
"""Responses that were fetched from the origin, stored in the cache, and then served to the client, as a percentage of all cache misses.
"""
try :
return self._cachepercentstoreablemiss
except Exception as e:
raise e
@property
def cachetotpetrequests(self) :
"""Requests that triggered a search of a content group that has Poll Every Time (PET) enabled (always consult the origin server before serving cached data).
"""
try :
return self._cachetotpetrequests
except Exception as e:
raise e
@property
def cachecompressedbytesserved(self) :
"""Number of compressed bytes served from the cache.
"""
try :
return self._cachecompressedbytesserved
except Exception as e:
raise e
@property
def cachetotresponsebytes(self) :
"""Total number of HTTP response bytes served by NetScaler from both the origin and the cache.
"""
try :
return self._cachetotresponsebytes
except Exception as e:
raise e
@property
def cachelargestresponsereceived(self) :
"""Size, in bytes, of largest response sent to client from the cache or the origin server.
"""
try :
return self._cachelargestresponsereceived
except Exception as e:
raise e
@property
def cachenonparameterizedinvalidationrequestsrate(self) :
"""Rate (/s) counter for cachetotnonparameterizedinvalidationrequests.
"""
try :
return self._cachenonparameterizedinvalidationrequestsrate
except Exception as e:
raise e
@property
def cacherecentpercentbytehit(self) :
"""Recently recorded cache byte hit ratio expressed as percentage. Here we define byte hit ratio as ((number of bytes served from the cache)/(total number of bytes served to the client)). This is the standard definition of Byte Hit Ratio. If compression is turned ON in NS then this ratio doesn't mean much. This might under or over estimate the origin-to-cache bandwidth saving (depending upon whether bytes served by CMP in NetScaler are more or less than compressed bytes served from the cache). If CMP is turned OFF in NS then this ratio is same as cacheRecentPercentOriginBandwidthSaved.
"""
try :
return self._cacherecentpercentbytehit
except Exception as e:
raise e
@property
def cacherecentpercentsuccessfulrevalidation(self) :
"""Recently recorded percentage of times stored content was successfully revalidated by a 304 response rather than by a full response.
"""
try :
return self._cacherecentpercentsuccessfulrevalidation
except Exception as e:
raise e
@property
def cachesuccessfulrevalidationrate(self) :
"""Rate (/s) counter for cachetotsuccessfulrevalidation.
"""
try :
return self._cachesuccessfulrevalidationrate
except Exception as e:
raise e
@property
def cachesqlhitsrate(self) :
"""Rate (/s) counter for cachetotsqlhits.
"""
try :
return self._cachesqlhitsrate
except Exception as e:
raise e
@property
def cacherequestsrate(self) :
"""Rate (/s) counter for cachetotrequests.
"""
try :
return self._cacherequestsrate
except Exception as e:
raise e
@property
def cachemissesrate(self) :
"""Rate (/s) counter for cachetotmisses.
"""
try :
return self._cachemissesrate
except Exception as e:
raise e
@property
def cachemaxmemorykb(self) :
"""Largest amount of memory the NetScaler can dedicate to caching, up to 50% of available memory. A 0 value disables caching, but the caching module continues to run. .
"""
try :
return self._cachemaxmemorykb
except Exception as e:
raise e
@property
def cachenumcached(self) :
"""Responses currently in integrated cache. Includes responses fully downloaded, in the process of being downloaded, and expired or flushed but not yet removed.
"""
try :
return self._cachenumcached
except Exception as e:
raise e
@property
def cachetothits(self) :
"""Responses served from the integrated cache. These responses match a policy with a CACHE action.
"""
try :
return self._cachetothits
except Exception as e:
raise e
@property
def cachetotsuccessfulrevalidation(self) :
"""Total number of times stored content was successfully revalidated by a 304 Not Modified response from the origin.
"""
try :
return self._cachetotsuccessfulrevalidation
except Exception as e:
raise e
@property
def cachepercentparameterized304hits(self) :
"""Percentage of parameterized 304 hits relative to all parameterized hits.
"""
try :
return self._cachepercentparameterized304hits
except Exception as e:
raise e
@property
def cacheflashcachehitsrate(self) :
"""Rate (/s) counter for cachetotflashcachehits.
"""
try :
return self._cacheflashcachehitsrate
except Exception as e:
raise e
@property
def cachestoreablemissesrate(self) :
"""Rate (/s) counter for cachetotstoreablemisses.
"""
try :
return self._cachestoreablemissesrate
except Exception as e:
raise e
@property
def cachetotflashcachehits(self) :
"""Number of requests to a content group with flash cache enabled that were cache hits. The flash cache setting queues requests that arrive simultaneously and distributes the response to all the clients in the queue.
"""
try :
return self._cachetotflashcachehits
except Exception as e:
raise e
@property
def cachetotsqlhits(self) :
"""sql response served from cache.
"""
try :
return self._cachetotsqlhits
except Exception as e:
raise e
@property
def cachetotrevalidationmiss(self) :
"""Responses that an intervening cache revalidated with the integrated cache before serving, as determined by a Cache-Control: Max-Age header configurable in the integrated cache.
"""
try :
return self._cachetotrevalidationmiss
except Exception as e:
raise e
@property
def cachemaxmemoryactivekb(self) :
"""Currently active value of maximum memory.
"""
try :
return self._cachemaxmemoryactivekb
except Exception as e:
raise e
@property
def cachetotstoreablemisses(self) :
"""Cache misses for which the fetched response is stored in the cache before serving it to the client. Storable misses conform to a built-in or user-defined caching policy that contains a CACHE action.
"""
try :
return self._cachetotstoreablemisses
except Exception as e:
raise e
@property
def cacheparameterizedhitsrate(self) :
"""Rate (/s) counter for cachetotparameterizedhits.
"""
try :
return self._cacheparameterizedhitsrate
except Exception as e:
raise e
@property
def cacheparameterized304hitsrate(self) :
"""Rate (/s) counter for cachetotparameterized304hits.
"""
try :
return self._cacheparameterized304hitsrate
except Exception as e:
raise e
@property
def cacheexpireatlastbyterate(self) :
"""Rate (/s) counter for cachetotexpireatlastbyte.
"""
try :
return self._cacheexpireatlastbyterate
except Exception as e:
raise e
@property
def cachecurmisses(self) :
"""Responses fetched from the origin and served from the cache. Should approximate storable misses. Does not include non-storable misses.
"""
try :
return self._cachecurmisses
except Exception as e:
raise e
@property
def cacherevalidationmissrate(self) :
"""Rate (/s) counter for cachetotrevalidationmiss.
"""
try :
return self._cacherevalidationmissrate
except Exception as e:
raise e
@property
def cachenon304hitsrate(self) :
"""Rate (/s) counter for cachetotnon304hits.
"""
try :
return self._cachenon304hitsrate
except Exception as e:
raise e
@property
def cachepercentsuccessfulrevalidation(self) :
"""Percentage of times stored content was successfully revalidated by a 304 (Object Not Modifed) response rather than by a full response.
"""
try :
return self._cachepercentsuccessfulrevalidation
except Exception as e:
raise e
@property
def cachetotnon304hits(self) :
"""Total number of full (non-304) responses served from the cache. A 304 status code indicates that a response has not been modified since the last time it was served.
"""
try :
return self._cachetotnon304hits
except Exception as e:
raise e
@property
def cachefulltoconditionalrequestrate(self) :
"""Rate (/s) counter for cachetotfulltoconditionalrequest.
"""
try :
return self._cachefulltoconditionalrequestrate
except Exception as e:
raise e
@property
def cacherecentpercentoriginbandwidthsaved(self) :
"""Bytes served from cache divided by total bytes served to client. This ratio can be greater than 1 because of the assumption that all compression has been done in the NetScaler.
"""
try :
return self._cacherecentpercentoriginbandwidthsaved
except Exception as e:
raise e
@property
def cacherecentpercentparameterizedhits(self) :
"""Recently recorded ratio of parameterized 304 hits to all parameterized hits expressed as a percentage.
"""
try :
return self._cacherecentpercentparameterizedhits
except Exception as e:
raise e
@property
def cachetotparameterizedinvalidationrequests(self) :
"""Requests matching a policy with an invalidation (INVAL) action and a content group that uses an invalidation selector or parameters.
"""
try :
return self._cachetotparameterizedinvalidationrequests
except Exception as e:
raise e
@property
def cachetotflashcachemisses(self) :
"""Number of requests to a content group with flash cache enabled that were cache misses. Flash cache distributes the response to all the clients in aqueue.
"""
try :
return self._cachetotflashcachemisses
except Exception as e:
raise e
@property
def cacheparameterizedrequestsrate(self) :
"""Rate (/s) counter for cachetotparameterizedrequests.
"""
try :
return self._cacheparameterizedrequestsrate
except Exception as e:
raise e
@property
def cacheerrmemalloc(self) :
"""Total number of times the cache failed to allocate memory to store responses.
"""
try :
return self._cacheerrmemalloc
except Exception as e:
raise e
@property
def cacherecentpercenthit(self) :
"""Recently recorded cache hit ratio expressed as percentage.
"""
try :
return self._cacherecentpercenthit
except Exception as e:
raise e
@property
def cachecompressedbytesservedrate(self) :
"""Rate (/s) counter for cachecompressedbytesserved.
"""
try :
return self._cachecompressedbytesservedrate
except Exception as e:
raise e
@property
def cache304hitsrate(self) :
"""Rate (/s) counter for cachetot304hits.
"""
try :
return self._cache304hitsrate
except Exception as e:
raise e
@property
def cachepercenthit(self) :
"""Cache hits as percentage of the total number of requests.
"""
try :
return self._cachepercenthit
except Exception as e:
raise e
@property
def cachenonstoreablemissesrate(self) :
"""Rate (/s) counter for cachetotnonstoreablemisses.
"""
try :
return self._cachenonstoreablemissesrate
except Exception as e:
raise e
@property
def cachetotinvalidationrequests(self) :
"""Requests that match an invalidation policy and result in expiration of specific cached responses or entire content groups.
"""
try :
return self._cachetotinvalidationrequests
except Exception as e:
raise e
@property
def cache64maxmemorykb(self) :
"""Largest amount of memory the NetScaler can dedicate to caching, up to 50% of available memory. A 0 value disables caching, but the caching module continues to run. .
"""
try :
return self._cache64maxmemorykb
except Exception as e:
raise e
@property
def cachetot304hits(self) :
"""Object not modified responses served from the cache. (Status code 304 served instead of the full response.).
"""
try :
return self._cachetot304hits
except Exception as e:
raise e
@property
def cachetotfulltoconditionalrequest(self) :
"""Number of user-agent requests for a cached Poll Every Time (PET) response that were sent to the origin server as conditional requests. .
"""
try :
return self._cachetotfulltoconditionalrequest
except Exception as e:
raise e
@property
def cachetotparameterizedhits(self) :
"""Parameterized requests resulting in either a 304 or non-304 hit.
"""
try :
return self._cachetotparameterizedhits
except Exception as e:
raise e
@property
def cachepetrequestsrate(self) :
"""Rate (/s) counter for cachetotpetrequests.
"""
try :
return self._cachepetrequestsrate
except Exception as e:
raise e
@property
def cacherecentpercent304hits(self) :
"""Recently recorded ratio of 304 hits to all hits expressed as percentage.
"""
try :
return self._cacherecentpercent304hits
except Exception as e:
raise e
@property
def cachetotnonstoreablemisses(self) :
"""Cache misses for which the fetched response is not stored in the cache. These responses match policies with a NOCACHE action or are affected by Poll Every Time.
"""
try :
return self._cachetotnonstoreablemisses
except Exception as e:
raise e
@property
def cachetotparameterizedrequests(self) :
"""Total number of requests where the content group has hit and invalidation parameters or selectors.
"""
try :
return self._cachetotparameterizedrequests
except Exception as e:
raise e
@property
def cachepercentoriginbandwidthsaved(self) :
"""Percentage of origin bandwidth saved, expressed as number of bytes served from the integrated cache divided by all bytes served. The assumption is that all compression is done in the NetScaler.
"""
try :
return self._cachepercentoriginbandwidthsaved
except Exception as e:
raise e
@property
def cachetotrequests(self) :
"""Total cache hits plus total cache misses.
"""
try :
return self._cachetotrequests
except Exception as e:
raise e
@property
def cacheparameterizednon304hitsrate(self) :
"""Rate (/s) counter for cachetotparameterizednon304hits.
"""
try :
return self._cacheparameterizednon304hitsrate
except Exception as e:
raise e
@property
def cachetotmisses(self) :
"""Intercepted HTTP requests requiring fetches from origin server.
"""
try :
return self._cachetotmisses
except Exception as e:
raise e
@property
def cachetotpethits(self) :
"""Number of times a cache hit was found during a search of a content group that has Poll Every Time enabled.
"""
try :
return self._cachetotpethits
except Exception as e:
raise e
@property
def cacheparameterizedinvalidationrequestsrate(self) :
"""Rate (/s) counter for cachetotparameterizedinvalidationrequests.
"""
try :
return self._cacheparameterizedinvalidationrequestsrate
except Exception as e:
raise e
@property
def cacherecentpercentstoreablemiss(self) :
"""Recently recorded ratio of store-able misses to all misses expressed as percentage.
"""
try :
return self._cacherecentpercentstoreablemiss
except Exception as e:
raise e
@property
def cacheflashcachemissesrate(self) :
"""Rate (/s) counter for cachetotflashcachemisses.
"""
try :
return self._cacheflashcachemissesrate
except Exception as e:
raise e
@property
def cacheinvalidationrequestsrate(self) :
"""Rate (/s) counter for cachetotinvalidationrequests.
"""
try :
return self._cacheinvalidationrequestsrate
except Exception as e:
raise e
@property
def cachetotparameterized304hits(self) :
"""Parameterized requests resulting in an object not modified (status code 304) response. .
"""
try :
return self._cachetotparameterized304hits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cache_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cache
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
""" Use this API to fetch the statistics of all cache_stats resources that are configured on netscaler.
"""
try :
obj = cache_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class cache_response(base_response) :
def __init__(self, length=1) :
self.cache = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cache = [cache_stats() for _ in range(length)]
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
# A script to kill hanging process. The tool will return non-zero if any
# process was actually found.
#
import optparse
import os
import signal
import subprocess
import sys
import utils
os_name = utils.GuessOS()
POSIX_INFO = 'ps -p %s -o args'
EXECUTABLE_NAMES = {
'win32': {
'chrome': 'chrome.exe',
'dart': 'dart.exe',
'dart_precompiled_runtime': 'dart_precompiled_runtime.exe',
'firefox': 'firefox.exe',
'gen_snapshot': 'gen_snapshot.exe',
'git': 'git.exe',
'iexplore': 'iexplore.exe',
'vctip': 'vctip.exe',
'mspdbsrv': 'mspdbsrv.exe',
},
'linux': {
'chrome': 'chrome',
'dart': 'dart',
'dart_precompiled_runtime': 'dart_precompiled_runtime',
'firefox': 'firefox',
'gen_snapshot': 'gen_snapshot',
'flutter_tester': 'flutter_tester',
'git': 'git',
},
'macos': {
'chrome': 'Chrome',
'chrome_helper': 'Chrome Helper',
'dart': 'dart',
'dart_precompiled_runtime': 'dart_precompiled_runtime',
'firefox': 'firefox',
'gen_snapshot': 'gen_snapshot',
'git': 'git',
'safari': 'Safari',
}
}
INFO_COMMAND = {
'win32': 'wmic process where Processid=%s get CommandLine',
'macos': POSIX_INFO,
'linux': POSIX_INFO,
}
STACK_INFO_COMMAND = {
'win32': None,
'macos': '/usr/bin/sample %s 1 4000 -mayDie',
'linux': '/usr/bin/eu-stack -p %s',
}
def GetOptions():
parser = optparse.OptionParser('usage: %prog [options]')
true_or_false = ['True', 'False']
parser.add_option(
"--kill_dart",
default='True',
type='choice',
choices=true_or_false,
help="Kill all dart processes")
parser.add_option(
"--kill_vc",
default='True',
type='choice',
choices=true_or_false,
help="Kill all git processes")
parser.add_option(
"--kill_vsbuild",
default='False',
type='choice',
choices=true_or_false,
help="Kill all visual studio build related processes")
parser.add_option(
"--kill_browsers",
default='False',
type='choice',
choices=true_or_false,
help="Kill all browser processes")
(options, args) = parser.parse_args()
return options
def GetPidsPosix(process_name):
# This is to have only one posix command, on linux we could just do:
# pidof process_name
cmd = 'ps -e -o pid= -o comm='
# Sample output:
# 1 /sbin/launchd
# 80943 /Applications/Safari.app/Contents/MacOS/Safari
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, stderr = p.communicate()
results = []
lines = output.splitlines()
for line in lines:
split = line.split()
# On mac this ps commands actually gives us the full path to non
# system binaries.
if len(split) >= 2 and " ".join(split[1:]).endswith(process_name):
results.append(split[0])
return results
def GetPidsWindows(process_name):
cmd = 'tasklist /FI "IMAGENAME eq %s" /NH' % process_name
# Sample output:
# dart.exe 4356 Console 1 6,800 K
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, stderr = p.communicate()
results = []
lines = output.splitlines()
for line in lines:
split = line.split()
if len(split) > 2 and split[0] == process_name:
results.append(split[1])
return results
def GetPids(process_name):
if os_name == "win32":
return GetPidsWindows(process_name)
else:
return GetPidsPosix(process_name)
def PrintPidStackInfo(pid):
command_pattern = STACK_INFO_COMMAND.get(os_name, False)
if command_pattern:
p = subprocess.Popen(command_pattern % pid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
stdout = stdout.splitlines()
stderr = stderr.splitlines()
print(" Stack:")
for line in stdout:
print(" %s" % line)
if stderr:
print(" Stack (stderr):")
for line in stderr:
print(" %s" % line)
def PrintPidInfo(pid, dump_stacks):
# We assume that the list command will return lines in the format:
# EXECUTABLE_PATH ARGS
# There may be blank strings in the output
p = subprocess.Popen(INFO_COMMAND[os_name] % pid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, stderr = p.communicate()
lines = output.splitlines()
# Pop the header
lines.pop(0)
print("Hanging process info:")
print(" PID: %s" % pid)
for line in lines:
# wmic will output a bunch of empty strings, we ignore these
if line: print(" Command line: %s" % line)
if dump_stacks:
PrintPidStackInfo(pid)
def KillPosix(pid):
try:
os.kill(int(pid), signal.SIGKILL)
except:
# Ignore this, the process is already dead from killing another process.
pass
def KillWindows(pid):
# os.kill is not available until python 2.7
cmd = "taskkill /F /PID %s" % pid
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
p.communicate()
def Kill(name, dump_stacks=False):
if name not in EXECUTABLE_NAMES[os_name]:
return 0
print("***************** Killing %s *****************" % name)
platform_name = EXECUTABLE_NAMES[os_name][name]
pids = GetPids(platform_name)
for pid in pids:
PrintPidInfo(pid, dump_stacks)
if os_name == "win32":
KillWindows(pid)
else:
KillPosix(pid)
print("Killed pid: %s" % pid)
if len(pids) == 0:
print(" No %s processes found." % name)
return len(pids)
def KillBrowsers():
status = Kill('firefox')
# We don't give error on killing chrome. It happens quite often that the
# browser controller fails in killing chrome, so we silently do it here.
Kill('chrome')
status += Kill('chrome_helper')
status += Kill('iexplore')
status += Kill('safari')
return status
def KillVCSystems():
status = Kill('git')
return status
def KillVSBuild():
status = Kill('vctip')
status += Kill('mspdbsrv')
return status
def KillDart():
status = Kill("dart", dump_stacks=True)
status += Kill("gen_snapshot", dump_stacks=True)
status += Kill("dart_precompiled_runtime", dump_stacks=True)
status += Kill("flutter_tester", dump_stacks=True)
return status
def Main():
options = GetOptions()
status = 0
if options.kill_dart == 'True':
if os_name == "win32":
# TODO(24086): Add result of KillDart into status once pub hang is fixed.
KillDart()
else:
status += KillDart()
if options.kill_vc == 'True':
status += KillVCSystems()
if options.kill_vsbuild == 'True' and os_name == 'win32':
status += KillVSBuild()
if options.kill_browsers == 'True':
status += KillBrowsers()
return status
if __name__ == '__main__':
sys.exit(Main())
|
|
import datetime
import decimal
import itertools
import re
import time
import urllib2
import urlparse
import uuid
import warnings
from operator import itemgetter
import gridfs
from bson import Binary, DBRef, SON, ObjectId
from mongoengine.python_support import (PY3, bin_type, txt_type,
str_types, StringIO)
from base import (BaseField, ComplexBaseField, ObjectIdField,
ValidationError, get_document, BaseDocument)
from queryset import DO_NOTHING, QuerySet
from document import Document, EmbeddedDocument
from connection import get_db, DEFAULT_CONNECTION_NAME
try:
from PIL import Image, ImageOps
except ImportError:
Image = None
ImageOps = None
__all__ = ['StringField', 'IntField', 'FloatField', 'BooleanField',
'DateTimeField', 'EmbeddedDocumentField', 'ListField', 'DictField',
'ObjectIdField', 'ReferenceField', 'ValidationError', 'MapField',
'DecimalField', 'ComplexDateTimeField', 'URLField', 'DynamicField',
'GenericReferenceField', 'FileField', 'BinaryField',
'SortedListField', 'EmailField', 'GeoPointField', 'ImageField',
'SequenceField', 'UUIDField', 'GenericEmbeddedDocumentField']
RECURSIVE_REFERENCE_CONSTANT = 'self'
class StringField(BaseField):
"""A unicode string field.
"""
def __init__(self, regex=None, max_length=None, min_length=None, **kwargs):
self.regex = re.compile(regex) if regex else None
self.max_length = max_length
self.min_length = min_length
super(StringField, self).__init__(**kwargs)
def to_python(self, value):
if isinstance(value, unicode):
return value
try:
value = value.decode('utf-8')
except:
pass
return value
def validate(self, value):
if not isinstance(value, basestring):
self.error('StringField only accepts string values')
if self.max_length is not None and len(value) > self.max_length:
self.error('String value is too long')
if self.min_length is not None and len(value) < self.min_length:
self.error('String value is too short')
if self.regex is not None and self.regex.match(value) is None:
self.error('String value did not match validation regex')
def lookup_member(self, member_name):
return None
def prepare_query_value(self, op, value):
if not isinstance(op, basestring):
return value
if op.lstrip('i') in ('startswith', 'endswith', 'contains', 'exact'):
flags = 0
if op.startswith('i'):
flags = re.IGNORECASE
op = op.lstrip('i')
regex = r'%s'
if op == 'startswith':
regex = r'^%s'
elif op == 'endswith':
regex = r'%s$'
elif op == 'exact':
regex = r'^%s$'
# escape unsafe characters which could lead to a re.error
value = re.escape(value)
value = re.compile(regex % value, flags)
return value
class URLField(StringField):
"""A field that validates input as an URL.
.. versionadded:: 0.3
"""
_URL_REGEX = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def __init__(self, verify_exists=False, url_regex=None, **kwargs):
self.verify_exists = verify_exists
self.url_regex = url_regex or self._URL_REGEX
super(URLField, self).__init__(**kwargs)
def validate(self, value):
if not self.url_regex.match(value):
self.error('Invalid URL: %s' % value)
return
if self.verify_exists:
warnings.warn(
"The URLField verify_exists argument has intractable security "
"and performance issues. Accordingly, it has been deprecated.",
DeprecationWarning
)
try:
request = urllib2.Request(value)
urllib2.urlopen(request)
except Exception, e:
self.error('This URL appears to be a broken link: %s' % e)
class EmailField(StringField):
"""A field that validates input as an E-Mail-Address.
.. versionadded:: 0.4
"""
EMAIL_REGEX = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE # domain
)
def validate(self, value):
if not EmailField.EMAIL_REGEX.match(value):
self.error('Invalid Mail-address: %s' % value)
super(EmailField, self).validate(value)
class IntField(BaseField):
"""An integer field.
"""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(IntField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = int(value)
except ValueError:
pass
return value
def validate(self, value):
try:
value = int(value)
except:
self.error('%s could not be converted to int' % value)
if self.min_value is not None and value < self.min_value:
self.error('Integer value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Integer value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return int(value)
class FloatField(BaseField):
"""An floating point number field.
"""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(FloatField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = float(value)
except ValueError:
pass
return value
def validate(self, value):
if isinstance(value, int):
value = float(value)
if not isinstance(value, float):
self.error('FloatField only accepts float values')
if self.min_value is not None and value < self.min_value:
self.error('Float value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Float value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return float(value)
class DecimalField(BaseField):
"""A fixed-point decimal number field.
.. versionadded:: 0.3
"""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(DecimalField, self).__init__(**kwargs)
def to_python(self, value):
original_value = value
if not isinstance(value, basestring):
value = unicode(value)
try:
value = decimal.Decimal(value)
except ValueError:
return original_value
return value
def to_mongo(self, value):
return unicode(value)
def validate(self, value):
if not isinstance(value, decimal.Decimal):
if not isinstance(value, basestring):
value = str(value)
try:
value = decimal.Decimal(value)
except Exception, exc:
self.error('Could not convert value to decimal: %s' % exc)
if self.min_value is not None and value < self.min_value:
self.error('Decimal value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Decimal value is too large')
class BooleanField(BaseField):
"""A boolean field type.
.. versionadded:: 0.1.2
"""
def to_python(self, value):
try:
value = bool(value)
except ValueError:
pass
return value
def validate(self, value):
if not isinstance(value, bool):
self.error('BooleanField only accepts boolean values')
class DateTimeField(BaseField):
"""A datetime field.
Note: Microseconds are rounded to the nearest millisecond.
Pre UTC microsecond support is effecively broken.
Use :class:`~mongoengine.fields.ComplexDateTimeField` if you
need accurate microsecond support.
"""
def validate(self, value):
if not isinstance(value, (datetime.datetime, datetime.date)):
self.error(u'cannot parse date "%s"' % value)
def to_mongo(self, value):
return self.prepare_query_value(None, value)
def prepare_query_value(self, op, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
# Attempt to parse a datetime:
# value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
return None
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5],
**kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3],
**kwargs)
except ValueError:
return None
class ComplexDateTimeField(StringField):
"""
ComplexDateTimeField handles microseconds exactly instead of rounding
like DateTimeField does.
Derives from a StringField so you can do `gte` and `lte` filtering by
using lexicographical comparison when filtering / sorting strings.
The stored string has the following format:
YYYY,MM,DD,HH,MM,SS,NNNNNN
Where NNNNNN is the number of microseconds of the represented `datetime`.
The `,` as the separator can be easily modified by passing the `separator`
keyword when initializing the field.
.. versionadded:: 0.5
"""
def __init__(self, separator=',', **kwargs):
self.names = ['year', 'month', 'day', 'hour', 'minute', 'second',
'microsecond']
self.separtor = separator
super(ComplexDateTimeField, self).__init__(**kwargs)
def _leading_zero(self, number):
"""
Converts the given number to a string.
If it has only one digit, a leading zero so as it has always at least
two digits.
"""
if int(number) < 10:
return "0%s" % number
else:
return str(number)
def _convert_from_datetime(self, val):
"""
Convert a `datetime` object to a string representation (which will be
stored in MongoDB). This is the reverse function of
`_convert_from_string`.
>>> a = datetime(2011, 6, 8, 20, 26, 24, 192284)
>>> RealDateTimeField()._convert_from_datetime(a)
'2011,06,08,20,26,24,192284'
"""
data = []
for name in self.names:
data.append(self._leading_zero(getattr(val, name)))
return ','.join(data)
def _convert_from_string(self, data):
"""
Convert a string representation to a `datetime` object (the object you
will manipulate). This is the reverse function of
`_convert_from_datetime`.
>>> a = '2011,06,08,20,26,24,192284'
>>> ComplexDateTimeField()._convert_from_string(a)
datetime.datetime(2011, 6, 8, 20, 26, 24, 192284)
"""
data = data.split(',')
data = map(int, data)
values = {}
for i in range(7):
values[self.names[i]] = data[i]
return datetime.datetime(**values)
def __get__(self, instance, owner):
data = super(ComplexDateTimeField, self).__get__(instance, owner)
if data == None:
return datetime.datetime.now()
if isinstance(data, datetime.datetime):
return data
return self._convert_from_string(data)
def __set__(self, instance, value):
value = self._convert_from_datetime(value) if value else value
return super(ComplexDateTimeField, self).__set__(instance, value)
def validate(self, value):
if not isinstance(value, datetime.datetime):
self.error('Only datetime objects may used in a '
'ComplexDateTimeField')
def to_python(self, value):
original_value = value
try:
return self._convert_from_string(value)
except:
return original_value
def to_mongo(self, value):
return self._convert_from_datetime(value)
def prepare_query_value(self, op, value):
return self._convert_from_datetime(value)
class EmbeddedDocumentField(BaseField):
"""An embedded document field - with a declared document_type.
Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`.
"""
def __init__(self, document_type, **kwargs):
if not isinstance(document_type, basestring):
if not issubclass(document_type, EmbeddedDocument):
self.error('Invalid embedded document class provided to an '
'EmbeddedDocumentField')
self.document_type_obj = document_type
super(EmbeddedDocumentField, self).__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, basestring):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def to_python(self, value):
if not isinstance(value, self.document_type):
return self.document_type._from_son(value)
return value
def to_mongo(self, value):
if not isinstance(value, self.document_type):
return value
return self.document_type.to_mongo(value)
def validate(self, value):
"""Make sure that the document instance is an instance of the
EmbeddedDocument subclass provided when the document was defined.
"""
# Using isinstance also works for subclasses of self.document
if not isinstance(value, self.document_type):
self.error('Invalid embedded document instance provided to an '
'EmbeddedDocumentField')
self.document_type.validate(value)
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
def prepare_query_value(self, op, value):
return self.to_mongo(value)
class GenericEmbeddedDocumentField(BaseField):
"""A generic embedded document field - allows any
:class:`~mongoengine.EmbeddedDocument` to be stored.
Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`.
.. note ::
You can use the choices param to limit the acceptable
EmbeddedDocument types
"""
def prepare_query_value(self, op, value):
return self.to_mongo(value)
def to_python(self, value):
if isinstance(value, dict):
doc_cls = get_document(value['_cls'])
value = doc_cls._from_son(value)
return value
def validate(self, value):
if not isinstance(value, EmbeddedDocument):
self.error('Invalid embedded document instance provided to an '
'GenericEmbeddedDocumentField')
value.validate()
def to_mongo(self, document):
if document is None:
return None
data = document.to_mongo()
if not '_cls' in data:
data['_cls'] = document._class_name
return data
class DynamicField(BaseField):
"""A truly dynamic field type capable of handling different and varying
types of data.
Used by :class:`~mongoengine.DynamicDocument` to handle dynamic data"""
def to_mongo(self, value):
"""Convert a Python type to a MongoDBcompatible type.
"""
if isinstance(value, basestring):
return value
if hasattr(value, 'to_mongo'):
return value.to_mongo()
if not isinstance(value, (dict, list, tuple)):
return value
is_list = False
if not hasattr(value, 'items'):
is_list = True
value = dict([(k, v) for k, v in enumerate(value)])
data = {}
for k, v in value.items():
data[k] = self.to_mongo(v)
if is_list: # Convert back to a list
value = [v for k, v in sorted(data.items(), key=itemgetter(0))]
else:
value = data
return value
def lookup_member(self, member_name):
return member_name
def prepare_query_value(self, op, value):
if isinstance(value, basestring):
from mongoengine.fields import StringField
return StringField().prepare_query_value(op, value)
return self.to_mongo(value)
class ListField(ComplexBaseField):
"""A list field that wraps a standard field, allowing multiple instances
of the field to be used as a list in the database.
If using with ReferenceFields see: :ref:`one-to-many-with-listfields`
.. note::
Required means it cannot be empty - as the default for ListFields is []
"""
# ListFields cannot be indexed with _types - MongoDB doesn't support this
_index_with_types = False
def __init__(self, field=None, **kwargs):
self.field = field
kwargs.setdefault('default', lambda: [])
super(ListField, self).__init__(**kwargs)
def validate(self, value):
"""Make sure that a list of valid fields is being used.
"""
if (not isinstance(value, (list, tuple, QuerySet)) or
isinstance(value, basestring)):
self.error('Only lists and tuples may be used in a list field')
super(ListField, self).validate(value)
def prepare_query_value(self, op, value):
if self.field:
if op in ('set', 'unset') and (not isinstance(value, basestring)
and not isinstance(value, BaseDocument)
and hasattr(value, '__iter__')):
return [self.field.prepare_query_value(op, v) for v in value]
return self.field.prepare_query_value(op, value)
return super(ListField, self).prepare_query_value(op, value)
class SortedListField(ListField):
"""A ListField that sorts the contents of its list before writing to
the database in order to ensure that a sorted list is always
retrieved.
.. warning::
There is a potential race condition when handling lists. If you set /
save the whole list then other processes trying to save the whole list
as well could overwrite changes. The safest way to append to a list is
to perform a push operation.
.. versionadded:: 0.4
.. versionchanged:: 0.6 - added reverse keyword
"""
_ordering = None
_order_reverse = False
def __init__(self, field, **kwargs):
if 'ordering' in kwargs.keys():
self._ordering = kwargs.pop('ordering')
if 'reverse' in kwargs.keys():
self._order_reverse = kwargs.pop('reverse')
super(SortedListField, self).__init__(field, **kwargs)
def to_mongo(self, value):
value = super(SortedListField, self).to_mongo(value)
if self._ordering is not None:
return sorted(value, key=itemgetter(self._ordering), reverse=self._order_reverse)
return sorted(value, reverse=self._order_reverse)
class DictField(ComplexBaseField):
"""A dictionary field that wraps a standard Python dictionary. This is
similar to an embedded document, but the structure is not defined.
.. note::
Required means it cannot be empty - as the default for ListFields is []
.. versionadded:: 0.3
.. versionchanged:: 0.5 - Can now handle complex / varying types of data
"""
def __init__(self, basecls=None, field=None, *args, **kwargs):
self.field = field
self.basecls = basecls or BaseField
if not issubclass(self.basecls, BaseField):
self.error('DictField only accepts dict values')
kwargs.setdefault('default', lambda: {})
super(DictField, self).__init__(*args, **kwargs)
def validate(self, value):
"""Make sure that a list of valid fields is being used.
"""
if not isinstance(value, dict):
self.error('Only dictionaries may be used in a DictField')
if any(k for k in value.keys() if not isinstance(k, basestring)):
self.error('Invalid dictionary key - documents must have only string keys')
if any(('.' in k or '$' in k) for k in value.keys()):
self.error('Invalid dictionary key name - keys may not contain "."'
' or "$" characters')
super(DictField, self).validate(value)
def lookup_member(self, member_name):
return DictField(basecls=self.basecls, db_field=member_name)
def prepare_query_value(self, op, value):
match_operators = ['contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith',
'exact', 'iexact']
if op in match_operators and isinstance(value, basestring):
return StringField().prepare_query_value(op, value)
return super(DictField, self).prepare_query_value(op, value)
class MapField(DictField):
"""A field that maps a name to a specified field type. Similar to
a DictField, except the 'value' of each item must match the specified
field type.
.. versionadded:: 0.5
"""
def __init__(self, field=None, *args, **kwargs):
if not isinstance(field, BaseField):
self.error('Argument to MapField constructor must be a valid '
'field')
super(MapField, self).__init__(field=field, *args, **kwargs)
class ReferenceField(BaseField):
"""A reference to a document that will be automatically dereferenced on
access (lazily).
Use the `reverse_delete_rule` to handle what should happen if the document
the field is referencing is deleted. EmbeddedDocuments, DictFields and
MapFields do not support reverse_delete_rules and an `InvalidDocumentError`
will be raised if trying to set on one of these Document / Field types.
The options are:
* DO_NOTHING - don't do anything (default).
* NULLIFY - Updates the reference to null.
* CASCADE - Deletes the documents associated with the reference.
* DENY - Prevent the deletion of the reference object.
* PULL - Pull the reference from a :class:`~mongoengine.ListField`
of references
Alternative syntax for registering delete rules (useful when implementing
bi-directional delete rules)
.. code-block:: python
class Bar(Document):
content = StringField()
foo = ReferenceField('Foo')
Bar.register_delete_rule(Foo, 'bar', NULLIFY)
.. note ::
`reverse_delete_rules` do not trigger pre / post delete signals to be
triggered.
.. versionchanged:: 0.5 added `reverse_delete_rule`
"""
def __init__(self, document_type, dbref=None,
reverse_delete_rule=DO_NOTHING, **kwargs):
"""Initialises the Reference Field.
:param dbref: Store the reference as :class:`~pymongo.dbref.DBRef`
or as the :class:`~pymongo.objectid.ObjectId`.id .
:param reverse_delete_rule: Determines what to do when the referring
object is deleted
"""
if not isinstance(document_type, basestring):
if not issubclass(document_type, (Document, basestring)):
self.error('Argument to ReferenceField constructor must be a '
'document class or a string')
if dbref is None:
msg = ("ReferenceFields will default to using ObjectId "
" strings in 0.8, set DBRef=True if this isn't desired")
warnings.warn(msg, FutureWarning)
self.dbref = dbref if dbref is not None else True # To change in 0.8
self.document_type_obj = document_type
self.reverse_delete_rule = reverse_delete_rule
super(ReferenceField, self).__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, basestring):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def __get__(self, instance, owner):
"""Descriptor to allow lazy dereferencing.
"""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
value = instance._data.get(self.name)
# Dereference DBRefs
if isinstance(value, DBRef):
value = self.document_type._get_db().dereference(value)
if value is not None:
instance._data[self.name] = self.document_type._from_son(value)
return super(ReferenceField, self).__get__(instance, owner)
def to_mongo(self, document):
if isinstance(document, DBRef):
if not self.dbref:
return document.id
return document
elif not self.dbref and isinstance(document, basestring):
return document
id_field_name = self.document_type._meta['id_field']
id_field = self.document_type._fields[id_field_name]
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.pk
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
else:
id_ = document
id_ = id_field.to_mongo(id_)
if self.dbref:
collection = self.document_type._get_collection_name()
return DBRef(collection, id_)
return id_
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type.
"""
if (not self.dbref and
not isinstance(value, (DBRef, Document, EmbeddedDocument))):
collection = self.document_type._get_collection_name()
value = DBRef(collection, self.document_type.id.to_python(value))
return value
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, (self.document_type, DBRef)):
self.error("A ReferenceField only accepts DBRef or documents")
if isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been '
'saved to the database')
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
class GenericReferenceField(BaseField):
"""A reference to *any* :class:`~mongoengine.document.Document` subclass
that will be automatically dereferenced on access (lazily).
.. note ::
* Any documents used as a generic reference must be registered in the
document registry. Importing the model will automatically register
it.
* You can use the choices param to limit the acceptable Document types
.. versionadded:: 0.3
"""
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
if isinstance(value, (dict, SON)):
instance._data[self.name] = self.dereference(value)
return super(GenericReferenceField, self).__get__(instance, owner)
def validate(self, value):
if not isinstance(value, (Document, DBRef)):
self.error('GenericReferences can only contain documents')
# We need the id from the saved object to create the DBRef
if isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been'
' saved to the database')
def dereference(self, value):
doc_cls = get_document(value['_cls'])
reference = value['_ref']
doc = doc_cls._get_db().dereference(reference)
if doc is not None:
doc = doc_cls._from_son(doc)
return doc
def to_mongo(self, document):
if document is None:
return None
if isinstance(document, (dict, SON)):
return document
id_field_name = document.__class__._meta['id_field']
id_field = document.__class__._fields[id_field_name]
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.id
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
else:
id_ = document
id_ = id_field.to_mongo(id_)
collection = document._get_collection_name()
ref = DBRef(collection, id_)
return {'_cls': document._class_name, '_ref': ref}
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
class BinaryField(BaseField):
"""A binary data field.
"""
def __init__(self, max_bytes=None, **kwargs):
self.max_bytes = max_bytes
super(BinaryField, self).__init__(**kwargs)
def __set__(self, instance, value):
"""Handle bytearrays in python 3.1"""
if PY3 and isinstance(value, bytearray):
value = bin_type(value)
return super(BinaryField, self).__set__(instance, value)
def to_mongo(self, value):
return Binary(value)
def validate(self, value):
if not isinstance(value, (bin_type, txt_type, Binary)):
self.error("BinaryField only accepts instances of "
"(%s, %s, Binary)" % (
bin_type.__name__, txt_type.__name__))
if self.max_bytes is not None and len(value) > self.max_bytes:
self.error('Binary value is too long')
class GridFSError(Exception):
pass
class GridFSProxy(object):
"""Proxy object to handle writing and reading of files to and from GridFS
.. versionadded:: 0.4
.. versionchanged:: 0.5 - added optional size param to read
.. versionchanged:: 0.6 - added collection name param
"""
_fs = None
def __init__(self, grid_id=None, key=None,
instance=None,
db_alias=DEFAULT_CONNECTION_NAME,
collection_name='fs'):
self.grid_id = grid_id # Store GridFS id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias',
'collection_name', 'newfile', 'gridout')
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if name in dir(obj):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __nonzero__(self):
return bool(self.grid_id)
def __getstate__(self):
self_dict = self.__dict__
self_dict['_fs'] = None
return self_dict
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.grid_id)
def __eq__(self, other):
if isinstance(other, GridFSProxy):
return ((self.grid_id == other.grid_id) and
(self.collection_name == other.collection_name) and
(self.db_alias == other.db_alias))
else:
return False
@property
def fs(self):
if not self._fs:
self._fs = gridfs.GridFS(get_db(self.db_alias), self.collection_name)
return self._fs
def get(self, id=None):
if id:
self.grid_id = id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = self.fs.get(self.grid_id)
return self.gridout
except:
# File has been deleted
return None
def new_file(self, **kwargs):
self.newfile = self.fs.new_file(**kwargs)
self.grid_id = self.newfile._id
def put(self, file_obj, **kwargs):
if self.grid_id:
raise GridFSError('This document already has a file. Either delete '
'it or call replace to overwrite it')
self.grid_id = self.fs.put(file_obj, **kwargs)
self._mark_as_changed()
def write(self, string):
if self.grid_id:
if not self.newfile:
raise GridFSError('This document already has a file. Either '
'delete it or call replace to overwrite it')
else:
self.new_file()
self.newfile.write(string)
def writelines(self, lines):
if not self.newfile:
self.new_file()
self.grid_id = self.newfile._id
self.newfile.writelines(lines)
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except:
return ""
def delete(self):
# Delete file from GridFS, FileField still remains
self.fs.delete(self.grid_id)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def replace(self, file_obj, **kwargs):
self.delete()
self.put(file_obj, **kwargs)
def close(self):
if self.newfile:
self.newfile.close()
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class FileField(BaseField):
"""A GridFS storage field.
.. versionadded:: 0.4
.. versionchanged:: 0.5 added optional size param for read
.. versionchanged:: 0.6 added db_alias for multidb support
"""
proxy_class = GridFSProxy
def __init__(self,
db_alias=DEFAULT_CONNECTION_NAME,
collection_name="fs", **kwargs):
super(FileField, self).__init__(**kwargs)
self.collection_name = collection_name
self.db_alias = db_alias
def __get__(self, instance, owner):
if instance is None:
return self
# Check if a file already exists for this model
grid_file = instance._data.get(self.name)
if not isinstance(grid_file, self.proxy_class):
grid_file = self.proxy_class(key=self.name, instance=instance,
db_alias=self.db_alias,
collection_name=self.collection_name)
instance._data[self.name] = grid_file
if not grid_file.key:
grid_file.key = self.name
grid_file.instance = instance
return grid_file
def __set__(self, instance, value):
key = self.name
if ((hasattr(value, 'read') and not
isinstance(value, GridFSProxy)) or isinstance(value, str_types)):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except:
pass
# Create a new file with the new data
grid_file.put(value)
else:
# Create a new proxy object as we don't already have one
instance._data[key] = self.proxy_class(key=key, instance=instance,
collection_name=self.collection_name)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def to_mongo(self, value):
# Store the GridFS file id in MongoDB
if isinstance(value, self.proxy_class) and value.grid_id is not None:
return value.grid_id
return None
def to_python(self, value):
if value is not None:
return self.proxy_class(value,
collection_name=self.collection_name,
db_alias=self.db_alias)
def validate(self, value):
if value.grid_id is not None:
if not isinstance(value, self.proxy_class):
self.error('FileField only accepts GridFSProxy values')
if not isinstance(value.grid_id, ObjectId):
self.error('Invalid GridFSProxy value')
class ImageGridFsProxy(GridFSProxy):
"""
Proxy for ImageField
versionadded: 0.6
"""
def put(self, file_obj, **kwargs):
"""
Insert a image in database
applying field properties (size, thumbnail_size)
"""
field = self.instance._fields[self.key]
try:
img = Image.open(file_obj)
img_format = img.format
except:
raise ValidationError('Invalid image')
if (field.size and (img.size[0] > field.size['width'] or
img.size[1] > field.size['height'])):
size = field.size
if size['force']:
img = ImageOps.fit(img,
(size['width'],
size['height']),
Image.ANTIALIAS)
else:
img.thumbnail((size['width'],
size['height']),
Image.ANTIALIAS)
thumbnail = None
if field.thumbnail_size:
size = field.thumbnail_size
if size['force']:
thumbnail = ImageOps.fit(img,
(size['width'],
size['height']),
Image.ANTIALIAS)
else:
thumbnail = img.copy()
thumbnail.thumbnail((size['width'],
size['height']),
Image.ANTIALIAS)
if thumbnail:
thumb_id = self._put_thumbnail(thumbnail,
img_format)
else:
thumb_id = None
w, h = img.size
io = StringIO()
img.save(io, img_format)
io.seek(0)
return super(ImageGridFsProxy, self).put(io,
width=w,
height=h,
format=img_format,
thumbnail_id=thumb_id,
**kwargs)
def delete(self, *args, **kwargs):
#deletes thumbnail
out = self.get()
if out and out.thumbnail_id:
self.fs.delete(out.thumbnail_id)
return super(ImageGridFsProxy, self).delete(*args, **kwargs)
def _put_thumbnail(self, thumbnail, format, **kwargs):
w, h = thumbnail.size
io = StringIO()
thumbnail.save(io, format)
io.seek(0)
return self.fs.put(io, width=w,
height=h,
format=format,
**kwargs)
@property
def size(self):
"""
return a width, height of image
"""
out = self.get()
if out:
return out.width, out.height
@property
def format(self):
"""
return format of image
ex: PNG, JPEG, GIF, etc
"""
out = self.get()
if out:
return out.format
@property
def thumbnail(self):
"""
return a gridfs.grid_file.GridOut
representing a thumbnail of Image
"""
out = self.get()
if out and out.thumbnail_id:
return self.fs.get(out.thumbnail_id)
def write(self, *args, **kwargs):
raise RuntimeError("Please use \"put\" method instead")
def writelines(self, *args, **kwargs):
raise RuntimeError("Please use \"put\" method instead")
class ImproperlyConfigured(Exception):
pass
class ImageField(FileField):
"""
A Image File storage field.
@size (width, height, force):
max size to store images, if larger will be automatically resized
ex: size=(800, 600, True)
@thumbnail (width, height, force):
size to generate a thumbnail
.. versionadded:: 0.6
"""
proxy_class = ImageGridFsProxy
def __init__(self, size=None, thumbnail_size=None,
collection_name='images', **kwargs):
if not Image:
raise ImproperlyConfigured("PIL library was not found")
params_size = ('width', 'height', 'force')
extra_args = dict(size=size, thumbnail_size=thumbnail_size)
for att_name, att in extra_args.items():
value = None
if isinstance(att, (tuple, list)):
if PY3:
value = dict(itertools.zip_longest(params_size, att,
fillvalue=None))
else:
value = dict(map(None, params_size, att))
setattr(self, att_name, value)
super(ImageField, self).__init__(
collection_name=collection_name,
**kwargs)
class GeoPointField(BaseField):
"""A list storing a latitude and longitude.
.. versionadded:: 0.4
"""
_geo_index = True
def validate(self, value):
"""Make sure that a geo-value is of type (x, y)
"""
if not isinstance(value, (list, tuple)):
self.error('GeoPointField can only accept tuples or lists '
'of (x, y)')
if not len(value) == 2:
self.error('Value must be a two-dimensional point')
if (not isinstance(value[0], (float, int)) and
not isinstance(value[1], (float, int))):
self.error('Both values in point must be float or int')
class SequenceField(IntField):
"""Provides a sequental counter (see http://www.mongodb.org/display/DOCS/Object+IDs#ObjectIDs-SequenceNumbers)
.. note::
Although traditional databases often use increasing sequence
numbers for primary keys. In MongoDB, the preferred approach is to
use Object IDs instead. The concept is that in a very large
cluster of machines, it is easier to create an object ID than have
global, uniformly increasing sequence numbers.
.. versionadded:: 0.5
"""
def __init__(self, collection_name=None, db_alias=None, sequence_name=None, *args, **kwargs):
self.collection_name = collection_name or 'mongoengine.counters'
self.db_alias = db_alias or DEFAULT_CONNECTION_NAME
self.sequence_name = sequence_name
return super(SequenceField, self).__init__(*args, **kwargs)
def generate_new_value(self):
"""
Generate and Increment the counter
"""
sequence_name = self.get_sequence_name()
sequence_id = "%s.%s" % (sequence_name, self.name)
collection = get_db(alias=self.db_alias)[self.collection_name]
counter = collection.find_and_modify(query={"_id": sequence_id},
update={"$inc": {"next": 1}},
new=True,
upsert=True)
return counter['next']
def get_sequence_name(self):
if self.sequence_name:
return self.sequence_name
owner = self.owner_document
if issubclass(owner, Document):
return owner._get_collection_name()
else:
return ''.join('_%s' % c if c.isupper() else c
for c in owner._class_name).strip('_').lower()
def __get__(self, instance, owner):
if instance is None:
return self
if not instance._data:
return
value = instance._data.get(self.name)
if not value and instance._initialised:
value = self.generate_new_value()
instance._data[self.name] = value
instance._mark_as_changed(self.name)
return int(value) if value else None
def __set__(self, instance, value):
if value is None and instance._initialised:
value = self.generate_new_value()
return super(SequenceField, self).__set__(instance, value)
def to_python(self, value):
if value is None:
value = self.generate_new_value()
return value
class UUIDField(BaseField):
"""A UUID field.
.. versionadded:: 0.6
"""
_binary = None
def __init__(self, binary=None, **kwargs):
"""
Store UUID data in the database
:param binary: (optional) boolean store as binary.
.. versionchanged:: 0.6.19
"""
if binary is None:
binary = False
msg = ("UUIDFields will soon default to store as binary, please "
"configure binary=False if you wish to store as a string")
warnings.warn(msg, FutureWarning)
self._binary = binary
super(UUIDField, self).__init__(**kwargs)
def to_python(self, value):
if not self._binary:
original_value = value
try:
if not isinstance(value, basestring):
value = unicode(value)
return uuid.UUID(value)
except:
return original_value
return value
def to_mongo(self, value):
if not self._binary:
return unicode(value)
return value
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, uuid.UUID):
if not isinstance(value, basestring):
value = str(value)
try:
value = uuid.UUID(value)
except Exception, exc:
self.error('Could not convert to UUID: %s' % exc)
|
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
import unittest2
import warnings
import httpretty as hp
from coinbase.wallet.client import Client
from coinbase.wallet.model import APIObject
from coinbase.wallet.model import new_api_object
from coinbase.wallet.model import Account
from coinbase.wallet.model import Sell
from coinbase.wallet.model import CurrentUser
from coinbase.wallet.model import Deposit
from coinbase.wallet.model import Checkout
from coinbase.wallet.model import Order
from coinbase.wallet.model import Withdrawal
from coinbase.wallet.model import Buy
from coinbase.wallet.model import Address
from coinbase.wallet.model import Transaction
from coinbase.wallet.model import Report
from tests.helpers import mock_response
# Hide all warning output.
warnings.showwarning = lambda *a, **k: None
# Dummy API key values for use in tests
api_key = 'fakeapikey'
api_secret = 'fakeapisecret'
client_id = 'fakeid'
client_secret = 'fakesecret'
access_token = 'fakeaccesstoken'
refresh_token = 'fakerefreshtoken'
mock_item = {'key1': 'val1', 'key2': 'val2'}
mock_item_updated = {
'key1': 'val1-modified',
'key2': 'val2-modified',
'key3': 'newkey',
}
mock_collection = [mock_item, mock_item]
class TestAPIObject(unittest2.TestCase):
@mock_response(hp.GET, '/resource/foo', mock_item_updated)
def test_refresh(self):
client = Client(api_key, api_secret)
obj = new_api_object(client, mock_item, APIObject)
self.assertEqual(obj, mock_item)
# Missing resource_path key results in ValueError
with self.assertRaises(ValueError):
obj.refresh()
obj.resource_path = '/resource/foo'
updated = obj.refresh()
self.assertEqual(updated, mock_item_updated)
# The updated version is returned, as well as being used to update the
# object making the refresh()
for key, value in six.iteritems(mock_item_updated):
self.assertEqual(obj[key], value)
# Keys not present originally will not be removed
self.assertEqual(obj.resource_path, '/resource/foo')
def test_dot_notation(self):
client = Client(api_key, api_secret)
obj = new_api_object(client, mock_item, APIObject)
with self.assertRaises(AttributeError):
obj.foo
mock_account = {
'id': 'foo',
'resource_path': '/v2/accounts/foo',
}
mock_account_updated = {
'id': 'foo',
'resource_path': '/v2/accounts/foo',
'newkey': 'present',
}
class TestAccount(unittest2.TestCase):
@mock_response(hp.POST, '/v2/accounts/foo/primary', mock_account_updated)
def test_set_primary(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
data = account.set_primary()
self.assertEqual(data, mock_account_updated)
for key, value in six.iteritems(mock_account_updated):
self.assertEqual(account[key], value)
@mock_response(hp.PUT, '/v2/accounts/foo', mock_account_updated)
def test_modify(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
data = account.modify(name='New Account Name')
self.assertEqual(data, mock_account_updated)
for key, value in six.iteritems(mock_account_updated):
self.assertEqual(account[key], value)
@mock_response(hp.DELETE, '/v2/accounts/foo', None)
def test_delete(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
data = account.delete()
self.assertIs(data, None)
@mock_response(hp.GET, '/v2/accounts/foo/addresses', mock_collection)
def test_get_addresses(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
addresses = account.get_addresses()
self.assertIsInstance(addresses, APIObject)
self.assertEqual(addresses.data, mock_collection)
for address in addresses.data:
self.assertIsInstance(address, Address)
@mock_response(hp.GET, '/v2/accounts/foo/addresses/bar', mock_item)
def test_get_address(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
address = account.get_address('bar')
self.assertIsInstance(address, Address)
self.assertEqual(address, mock_item)
pass
@mock_response(hp.GET, '/v2/accounts/foo/addresses/bar/transactions', mock_collection)
def test_get_address_transactions(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
transactions = account.get_address_transactions('bar')
self.assertIsInstance(transactions, APIObject)
self.assertEqual(transactions.data, mock_collection)
for transaction in transactions.data:
self.assertIsInstance(transaction, Transaction)
@mock_response(hp.POST, '/v2/accounts/foo/addresses', mock_item)
def test_create_address(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
address = account.create_address()
self.assertIsInstance(address, Address)
self.assertEqual(address, mock_item)
@mock_response(hp.GET, '/v2/accounts/foo/transactions', mock_collection)
def test_get_transactions(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
transactions = account.get_transactions()
self.assertIsInstance(transactions, APIObject)
self.assertEqual(transactions.data, mock_collection)
for transaction in transactions.data:
self.assertIsInstance(transaction, Transaction)
@mock_response(hp.GET, '/v2/accounts/foo/transactions/bar', mock_item)
def test_get_transaction(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
transaction = account.get_transaction('bar')
self.assertIsInstance(transaction, Transaction)
self.assertEqual(transaction, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/transactions', mock_item)
def test_send_money(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
# Start with none of the required arguments, and slowly make requests with
# an additional required argument, expecting failure until all arguments
# are present.
send_kwargs = {}
required_kwargs = {'to': 'bar', 'amount': '1.0', 'currency': 'USD'}
while required_kwargs:
with self.assertRaises(ValueError):
transaction = account.send_money(**send_kwargs)
for key in required_kwargs:
send_kwargs[key] = required_kwargs.pop(key)
break
transaction = account.send_money(**send_kwargs)
self.assertIsInstance(transaction, Transaction)
self.assertEqual(transaction, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/transactions', mock_item)
def test_transfer_money(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
# Start with none of the required arguments, and slowly make requests with
# an additional required argument, expecting failure until all arguments
# are present.
send_kwargs = {}
required_kwargs = {'to': 'bar', 'amount': '1.0', 'currency': 'USD'}
while required_kwargs:
with self.assertRaises(ValueError):
transaction = account.transfer_money(**send_kwargs)
for key in required_kwargs:
send_kwargs[key] = required_kwargs.pop(key)
break
transaction = account.transfer_money(**send_kwargs)
self.assertIsInstance(transaction, Transaction)
self.assertEqual(transaction, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/transactions', mock_item)
def test_request_money(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
# Start with none of the required arguments, and slowly make requests with
# an additional required argument, expecting failure until all arguments
# are present.
send_kwargs = {}
required_kwargs = {'to': 'bar', 'amount': '1.0', 'currency': 'USD'}
while required_kwargs:
with self.assertRaises(ValueError):
transaction = account.request_money(**send_kwargs)
for key in required_kwargs:
send_kwargs[key] = required_kwargs.pop(key)
break
transaction = account.request_money(**send_kwargs)
self.assertIsInstance(transaction, Transaction)
self.assertEqual(transaction, mock_item)
@mock_response(hp.GET, '/v2/reports', mock_collection)
def test_get_reports(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
reports = account.get_reports()
self.assertIsInstance(reports, APIObject)
self.assertEqual(reports.data, mock_collection)
for report in reports.data:
self.assertIsInstance(report, Report)
@mock_response(hp.GET, '/v2/reports/testreportid', mock_item)
def test_get_report(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
report = account.get_report('testreportid')
self.assertIsInstance(report, Report)
self.assertEqual(report, mock_item)
@mock_response(hp.POST, '/v2/reports', mock_item)
def test_create_report(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
report = account.create_report(type='transactions', email='example@coinbase.com')
self.assertIsInstance(report, APIObject)
self.assertIsInstance(report, Report)
self.assertEqual(report, mock_item)
@mock_response(hp.GET, '/v2/accounts/foo/buys', mock_collection)
def test_get_buys(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
buys = account.get_buys()
self.assertIsInstance(buys, APIObject)
self.assertEqual(buys.data, mock_collection)
for buy in buys.data:
self.assertIsInstance(buy, Buy)
@mock_response(hp.GET, '/v2/accounts/foo/buys/bar', mock_item)
def test_get_buy(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
buy = account.get_buy('bar')
self.assertIsInstance(buy, Buy)
self.assertEqual(buy, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/buys', mock_item)
def test_buy(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
with self.assertRaises(ValueError):
account.buy()
kwargs_list = [
{'amount': '1.0', 'payment_method': 'bar', 'currency': 'USD'},
{'total': '1.0', 'payment_method': 'bar', 'currency': 'USD'}
]
for valid_kwargs in kwargs_list:
buy = account.buy(**valid_kwargs)
self.assertIsInstance(buy, Buy)
self.assertEqual(buy, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/buys/bar/commit', mock_item)
def test_commit_buy(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
buy = account.commit_buy('bar')
self.assertIsInstance(buy, Buy)
self.assertEqual(buy, mock_item)
@mock_response(hp.GET, '/v2/accounts/foo/sells', mock_collection)
def test_get_sells(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
sells = account.get_sells()
self.assertIsInstance(sells, APIObject)
self.assertEqual(sells.data, mock_collection)
for sell in sells.data:
self.assertIsInstance(sell, Sell)
@mock_response(hp.GET, '/v2/accounts/foo/sells/bar', mock_item)
def test_get_sell(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
sell = account.get_sell('bar')
self.assertIsInstance(sell, Sell)
self.assertEqual(sell, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/sells', mock_item)
def test_sell(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
with self.assertRaises(ValueError):
account.sell()
kwargs_list = [
{'amount': '1.0', 'currency': 'USD'},
{'total': '1.0', 'currency': 'USD'}
]
for valid_kwargs in kwargs_list:
sell = account.sell(**valid_kwargs)
self.assertIsInstance(sell, Sell)
self.assertEqual(sell, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/sells/bar/commit', mock_item)
def test_commit_sell(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
sell = account.commit_sell('bar')
self.assertIsInstance(sell, Sell)
self.assertEqual(sell, mock_item)
@mock_response(hp.GET, '/v2/accounts/foo/deposits', mock_collection)
def test_get_deposits(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
deposits = account.get_deposits()
self.assertIsInstance(deposits, APIObject)
self.assertEqual(deposits.data, mock_collection)
for deposit in deposits.data:
self.assertIsInstance(deposit, Deposit)
@mock_response(hp.GET, '/v2/accounts/foo/deposits/bar', mock_item)
def test_get_deposit(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
deposit = account.get_deposit('bar')
self.assertIsInstance(deposit, Deposit)
self.assertEqual(deposit, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/deposits', mock_item)
def test_deposit(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
# Start with none of the required arguments, and slowly make requests with
# an additional required argument, expecting failure until all arguments
# are present.
send_kwargs = {}
required_kwargs = {'payment_method': 'bar', 'amount': '1.0', 'currency': 'USD'}
while required_kwargs:
with self.assertRaises(ValueError):
account.deposit(**send_kwargs)
for key in required_kwargs:
send_kwargs[key] = required_kwargs.pop(key)
break
deposit = account.deposit(**send_kwargs)
self.assertIsInstance(deposit, Deposit)
self.assertEqual(deposit, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/deposits/bar/commit', mock_item)
def test_commit_deposit(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
deposit = account.commit_deposit('bar')
self.assertIsInstance(deposit, Deposit)
self.assertEqual(deposit, mock_item)
@mock_response(hp.GET, '/v2/accounts/foo/withdrawals', mock_collection)
def test_get_withdrawals(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
withdrawals = account.get_withdrawals()
self.assertIsInstance(withdrawals, APIObject)
self.assertEqual(withdrawals.data, mock_collection)
for withdrawal in withdrawals.data:
self.assertIsInstance(withdrawal, Withdrawal)
@mock_response(hp.GET, '/v2/accounts/foo/withdrawals/bar', mock_item)
def test_get_withdrawal(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
withdrawal = account.get_withdrawal('bar')
self.assertIsInstance(withdrawal, Withdrawal)
self.assertEqual(withdrawal, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/withdrawals', mock_item)
def test_withdraw(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
# Start with none of the required arguments, and slowly make requests with
# an additional required argument, expecting failure until all arguments
# are present.
send_kwargs = {}
required_kwargs = {'payment_method': 'bar', 'amount': '1.0', 'currency': 'USD'}
while required_kwargs:
with self.assertRaises(ValueError):
account.withdraw(**send_kwargs)
for key in required_kwargs:
send_kwargs[key] = required_kwargs.pop(key)
break
withdrawal = account.withdraw(**send_kwargs)
self.assertIsInstance(withdrawal, Withdrawal)
self.assertEqual(withdrawal, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/withdrawals/bar/commit', mock_item)
def test_commit_withdrawal(self):
client = Client(api_key, api_secret)
account = new_api_object(client, mock_account, Account)
withdrawal = account.commit_withdrawal('bar')
self.assertIsInstance(withdrawal, Withdrawal)
self.assertEqual(withdrawal, mock_item)
mock_checkout = {
'id': 'foo',
'resource_path': '/v2/checkouts/foo',
}
class TestCheckout(unittest2.TestCase):
@mock_response(hp.GET, '/v2/checkouts/foo/orders', mock_collection)
def test_get_orders(self):
client = Client(api_key, api_secret)
checkout = new_api_object(client, mock_checkout, Checkout)
orders = checkout.get_orders()
self.assertIsInstance(orders, APIObject)
self.assertEqual(orders.data, mock_collection)
for order in orders.data:
self.assertIsInstance(order, Order)
@mock_response(hp.POST, '/v2/checkouts/foo/orders', mock_item)
def test_create_order(self):
client = Client(api_key, api_secret)
checkout = new_api_object(client, mock_checkout, Checkout)
order = checkout.create_order()
self.assertIsInstance(order, Order)
self.assertEqual(order, mock_item)
mock_order = {
'id': 'foo',
'resource_path': '/v2/orders/foo',
}
class TestOrder(unittest2.TestCase):
@mock_response(hp.POST, '/v2/orders/foo/refund', mock_item)
def test_refund(self):
client = Client(api_key, api_secret)
order = new_api_object(client, mock_order, Order)
# Start with none of the required arguments, and slowly make requests with
# an additional required argument, expecting failure until all arguments
# are present.
send_kwargs = {}
required_kwargs = {'currency': 'USD'}
while required_kwargs:
with self.assertRaises(ValueError):
order.refund(**send_kwargs)
for key in required_kwargs:
send_kwargs[key] = required_kwargs.pop(key)
break
order = order.refund(**send_kwargs)
self.assertIsInstance(order, Order)
self.assertEqual(order, mock_item)
mock_transaction = {
'id': 'bar',
'resource_path': '/v2/accounts/foo/transactions/bar',
}
class TestTransaction(unittest2.TestCase):
@mock_response(hp.POST, '/v2/accounts/foo/transactions/bar/complete', mock_item)
def test_complete(self):
client = Client(api_key, api_secret)
transaction = new_api_object(client, mock_transaction, Transaction)
response = transaction.complete()
self.assertIsInstance(response, APIObject)
self.assertEqual(response, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/transactions/bar/resend', mock_item)
def test_resend(self):
client = Client(api_key, api_secret)
transaction = new_api_object(client, mock_transaction, Transaction)
response = transaction.resend()
self.assertIsInstance(response, APIObject)
self.assertEqual(response, mock_item)
@mock_response(hp.POST, '/v2/accounts/foo/transactions/bar/cancel', mock_item)
def test_cancel(self):
client = Client(api_key, api_secret)
transaction = new_api_object(client, mock_transaction, Transaction)
response = transaction.cancel()
self.assertIsInstance(response, APIObject)
self.assertEqual(response, mock_item)
mock_buy = {
'id': 'bar',
'resource_path': '/v2/accounts/foo/buys/bar',
}
mock_buy_updated = {
'id': 'bar',
'resource_path': '/v2/accounts/foo/buys/bar',
'updated': True,
}
class TestBuy(unittest2.TestCase):
@mock_response(hp.POST, '/v2/accounts/foo/buys/bar/commit', mock_buy_updated)
def test_commit(self):
client = Client(api_key, api_secret)
buy = new_api_object(client, mock_buy, Buy)
buy2 = buy.commit()
self.assertIsInstance(buy2, Buy)
self.assertEqual(buy2, mock_buy_updated)
for key, value in six.iteritems(mock_buy_updated):
self.assertEqual(buy[key], value)
mock_sell = {
'id': 'bar',
'resource_path': '/v2/accounts/foo/sells/bar',
}
mock_sell_updated = {
'id': 'bar',
'resource_path': '/v2/accounts/foo/sells/bar',
'updated': True,
}
class TestSell(unittest2.TestCase):
@mock_response(hp.POST, '/v2/accounts/foo/sells/bar/commit', mock_sell_updated)
def test_commit(self):
client = Client(api_key, api_secret)
sell = new_api_object(client, mock_sell, Sell)
sell2 = sell.commit()
self.assertIsInstance(sell2, Sell)
self.assertEqual(sell2, mock_sell_updated)
for key, value in six.iteritems(mock_sell_updated):
self.assertEqual(sell[key], value)
mock_deposit = {
'id': 'bar',
'resource_path': '/v2/accounts/foo/deposits/bar',
}
mock_deposit_updated = {
'id': 'bar',
'resource_path': '/v2/accounts/foo/deposits/bar',
'updated': True,
}
class TestDeposit(unittest2.TestCase):
@mock_response(hp.POST, '/v2/accounts/foo/deposits/bar/commit', mock_deposit_updated)
def test_commit(self):
client = Client(api_key, api_secret)
deposit = new_api_object(client, mock_deposit, Deposit)
deposit2 = deposit.commit()
self.assertIsInstance(deposit2, Deposit)
self.assertEqual(deposit2, mock_deposit_updated)
for key, value in six.iteritems(mock_deposit_updated):
self.assertEqual(deposit[key], value)
mock_withdrawal = {
'id': 'bar',
'resource_path': '/v2/accounts/foo/withdrawals/bar',
}
mock_withdrawal_updated = {
'id': 'bar',
'resource_path': '/v2/accounts/foo/withdrawals/bar',
'updated': True,
}
class TestWithdrawal(unittest2.TestCase):
@mock_response(hp.POST, '/v2/accounts/foo/withdrawals/bar/commit', mock_withdrawal_updated)
def test_commit(self):
client = Client(api_key, api_secret)
withdrawal = new_api_object(client, mock_withdrawal, Withdrawal)
withdrawal2 = withdrawal.commit()
self.assertIsInstance(withdrawal2, Withdrawal)
self.assertEqual(withdrawal2, mock_withdrawal_updated)
for key, value in six.iteritems(mock_withdrawal_updated):
self.assertEqual(withdrawal[key], value)
pass
class TestCurrentUser(unittest2.TestCase):
@mock_response(hp.PUT, '/v2/user', mock_item_updated)
def test_modify(self):
client = Client(api_key, api_secret)
user = new_api_object(client, mock_item, CurrentUser)
user2 = user.modify(name='New Name')
self.assertIsInstance(user2, CurrentUser)
self.assertEqual(user2, mock_item_updated)
for key, value in six.iteritems(mock_item_updated):
self.assertEqual(user[key], value)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 17 15:08:14 2014
@author: bottero
Script to watch the results of a run.
TODO : _load prior profiles as well
_
"""
### --- MODULES AND PACKAGES --- ###
import os, sys
import argparse # To deal with arguments :
# https://docs.python.org/2/library/argparse.html
import numpy as np # NumPy (multidimensional arrays, linear algebra, ...)
import glob # Unix style pathname pattern expansion
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt # Matplotlib's pyplot: MATLAB-like syntax
def pdense(x, y, sigma, M=1000):
""" Plot probability density of y with known stddev sigma
"""
assert len(x) == len(y) and len(x) == len(sigma)
N = len(x)
# TODO: better y ranging
ymin, ymax = min(y - 2 * sigma), max(y + 2 * sigma)
yy = np.linspace(ymin, ymax, M)
a = [np.exp(-((Y - yy) / s) ** 2) / s for Y, s in zip(y, sigma)]
A = np.array(a)
A = A.reshape(N, M)
plt.imshow(-A, cmap='gray', aspect='auto',
origin='upper', extent=(ymin,ymax,max(x),min(x)))
# plt.title('Density plot')
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def representsFloat(s):
try:
float(s)
return True
except ValueError:
return False
### --- Parser arguments --- ###
parser = argparse.ArgumentParser(description='Watch the results of a run')
parser.add_argument("pathToDir",
help="Path to result directory (ex : OUTPUT_FILES/XXX)")
parser.add_argument("-v","--verbose", help="Increase output verbosity",
action="store_true")
parser.add_argument("-a","--all", help="Plot everything",
action="store_true", default=False)
parser.add_argument("-d","--data", help="Plot the first guess curve (and real curves for analytical runs))",
action="store_true")
parser.add_argument("-g","--geometry", help="Plot the geometry used (sources and receivers)",
action="store_true")
parser.add_argument("-e","--energies", help="Plot the energies of the chains",
action="store_true")
parser.add_argument("-b","--best", help="Plot the some good models found and the residuals of the best",
action="store_true")
parser.add_argument("--dont_show_guess", help="Don't show first guess model on plots",
action="store_true", default=False)
parser.add_argument("--show_ranges", help="Show exploration ranges on plots",
action="store_true", default=False)
parser.add_argument("--show_averages", help="Show average models on plots",
action="store_true", default=False)
parser.add_argument("--no_density_plots", help="Represent uncertainties by a range",
action="store_true", default=False)
parser.add_argument("-r","--results", help="Plot the results from the inversion",
action="store_true", default=False)
parser.add_argument("--resultsChain",type=int,
help="Plot the results from the inversion for given \
chain number",default=-1)
parser.add_argument("-t","--treshold",type=int,
help="Iteration after which we show the model calculated",
default=0)
parser.add_argument("--recalculate_t0", help="Force recalculate t0 (even if it had not been chosen during the run)",
action="store_true")
parser.add_argument("-s","--swaps", help="Plot the swaps",
action="store_true")
parser.add_argument("--vpvs", help="Plot mean Vp/Vs ratio plus uncertainties",
action="store_true")
parser.add_argument("--filtering", help="Show optimum filtering",
action="store_true")
parser.add_argument("--paper", help="Plot the figures for the paper",
action="store_true")
args = parser.parse_args()
### --- Test arguments --- ###
if not os.path.isdir(args.pathToDir): # If the path does not exist
print "Directory ",args.pathToDir," not found."
parser.print_help()
sys.exit(0)
if args.pathToDir[-1:] != '/': # add a / at the end of the path if necessary
args.pathToDir+='/'
code = glob.glob1(args.pathToDir,"stats0.*")[0].split('.')[1]
#args.pathToDir.split("/")[-2] # just keep the name of the directory
if not representsInt(code):
print "Directory ",args.pathToDir," does not seem to be a correct IMCMC \
directory... (no stats0 found)"
sys.exit(0)
if not (args.all or args.data or args.geometry or args.energies or args.results or args.best or (args.resultsChain != -1) or args.filtering or args.swaps or args.vpvs or args.paper):
print "Nothing has to be done!"
sys.exit(0)
### --- Load files --- ###
# Extract informations from config.XXX.dat :
# TODO : read the cfg file
T = np.zeros(0)
with open(args.pathToDir+'config.cfg') as configFile:
for line in configFile:
if line.split('#')[0].strip(): # If the line is not a comment
if 'SWAVES' in line:
if line.split(" = ")[1].split("#")[0].strip() == "1":
swaves=True
else:
swaves=False
if 'ANALYTICAL_RUN' in line:
if line.split(" = ")[1].split("#")[0].strip() == "1":
analytical=True
else:
analytical=False
if 'RECALCULATE_T0' in line:
if line.split(" = ")[1].split("#")[0].strip() == "1":
recalculate_t0=True
else:
recalculate_t0=False
if 'QP =' in line:
qp = line.split(" = ")[1].split("#")[0].strip()
if 'NAME_OF_FIRST_GUESS_P_FILE' in line:
nameOfFirstGuessP = line.split(" = ")[1].split("#")[0].strip()
if 'NAME_OF_FIRST_GUESS_S_FILE' in line:
nameOfFirstGuessS = line.split(" = ")[1].split("#")[0].strip()
if 'NAME_OF_REAL_PROFILE_FILE_P' in line:
nameOfrealP = line.split(" = ")[1].split("#")[0].strip()
if 'NAME_OF_REAL_PROFILE_FILE_S' in line:
nameOfrealS = line.split(" = ")[1].split("#")[0].strip()
if 'N_PRIOR_PROFILES' in line:
nPriorProfiles = line.split(" = ")[1].split("#")[0].strip()
if representsInt(nPriorProfiles): # Verify that the string extracted is a int
nPriorProfiles = int(nPriorProfiles)
if 'NAME_OF_STATIONS_FILE' in line:
nameOfStationsFile = line.split(" = ")[1].split("#")[0].strip()
if 'NAME_OF_SHOTS_FILE' in line:
nameOfShotsFile = line.split(" = ")[1].split("#")[0].strip()
if 'NAME_OF_TIMES_FILE =' in line:
nameOfTimesFile = line.split(" = ")[1].split("#")[0].strip()
if 'SIGMAP =' in line:
sigmaP = line.split(" = ")[1].split("#")[0].strip()
if representsFloat(sigmaP): # Verify that the string extracted is a int
sigmaP = float(sigmaP)
if 'SIGMAS =' in line:
sigmaS = line.split(" = ")[1].split("#")[0].strip()
if representsFloat(sigmaS): # Verify that the string extracted is a int
sigmaS = float(sigmaS)
if 'COORD_TOL =' in line:
coord_tol = line.split(" = ")[1].split("#")[0].strip()
if representsFloat(coord_tol): # Verify that the string extracted is a int
coord_tol = float(coord_tol)
with open(args.pathToDir+'config.'+code+'.dat') as outConfigFile:
for line in outConfigFile:
if 'Energy of the prior : ' in line:
ep=line.split(":")[1].strip("\n").strip()
if representsFloat(ep): # Verify that the string extracted is a int
ep = float(ep)
if 'temperatures : ' in line:
# Extract the number of temperatures from the line :
nbt=(line.split(":")[-1]).split("\n")[0].strip()
if representsInt(nbt): # Verify that the string extracted is a int
nbt = int(nbt)
T=np.resize(T,nbt)
if 'Temperature max : ' in line:
# Extract temperature max from the line :
tmax=(line.split(":")[-1]).split("\n")[0].strip()
if representsInt(tmax): # Verify that the string extracted is a int
tmax = int(tmax)
if 'Temperature ladder : ' in line: # Extract temperature ladder from line
T[nbt-1]=line.split("T["+str(nbt-1)+"] = ")[1].strip("\n").strip()
for i in np.arange(nbt-1):
T[i]=line.split("T["+str(i)+"] = ")[1].split("T["+str(i+1)+"] = ")[0].strip()
if args.verbose:
print "Watching the results of run : ",code,"..."
if analytical:
print "This is an analytical run"
if swaves:
print "S waves arrival times were calculated"
print "There are ",nbt," temperatures (tmax =",tmax,") :",
for i in np.arange(nbt):
print " T[",i,"] = ",T[i],
print
print "Loading files ..."
### --- Load files --- ###
# declare empty list to store files :
M=1000 # For density plot
averagesP=[0]*nbt # declare empty list to store files :
averagesS=[0]*nbt
chains=[0]*nbt
varPs=[0]*nbt
varSs=[0]*nbt
qInfPs=[0]*nbt
qSupPs=[0]*nbt
qInfSs=[0]*nbt
qSupSs=[0]*nbt
minP=[0]*nbt
minS=[0]*nbt
maxP=[0]*nbt
maxS=[0]*nbt
priorP=[0]*nPriorProfiles
priorS=[0]*nPriorProfiles
timesData=[0]
nFiltered=len(glob.glob1(args.pathToDir,"filteredFirstGuessP.*"))
filteredPcurve=[0]*nFiltered
waveletFiltered=[""]*nFiltered
filteredScurve=[0]*nFiltered
# Loop on temperature chains
for i in np.arange(nbt):
averagesP[i]=np.loadtxt(args.pathToDir+"averageP"+str(i)+"."+code+".dat")
chains[i]=np.loadtxt(args.pathToDir+"chain"+str(i)+"."+code+".dat")
varPs[i]=np.loadtxt(args.pathToDir+"varP"+str(i)+"."+code+".dat")
qSupPs[i]=np.loadtxt(args.pathToDir+"qSupP"+str(i)+"."+code+".dat")
qInfPs[i]=np.loadtxt(args.pathToDir+"qInfP"+str(i)+"."+code+".dat")
minP[i]=np.loadtxt(args.pathToDir+"minP."+str(i)+"."+code+".dat")
maxP[i]=np.loadtxt(args.pathToDir+"maxP."+str(i)+"."+code+".dat")
if swaves:
averagesS[i]=np.loadtxt(args.pathToDir+"averageS"+str(i)+"."+code+".dat")
varSs[i]=np.loadtxt(args.pathToDir+"varS"+str(i)+"."+code+".dat")
qSupSs[i]=np.loadtxt(args.pathToDir+"qSupS"+str(i)+"."+code+".dat")
qInfSs[i]=np.loadtxt(args.pathToDir+"qInfS"+str(i)+"."+code+".dat")
minS[i]=np.loadtxt(args.pathToDir+"minS."+str(i)+"."+code+".dat")
maxS[i]=np.loadtxt(args.pathToDir+"maxS."+str(i)+"."+code+".dat")
for i,filteredGuessPCurve in enumerate(glob.glob1(args.pathToDir,"filteredFirstGuessP.*")):
waveletFiltered[i]=filteredGuessPCurve.split('.')[1]
filteredPcurve[i] = np.loadtxt(args.pathToDir+filteredGuessPCurve)
for i,filteredGuessSCurve in enumerate(glob.glob1(args.pathToDir,"filteredFirstGuessS.*")):
filteredScurve[i] = np.loadtxt(args.pathToDir+filteredGuessSCurve)
globalAverageP=np.loadtxt(args.pathToDir+"globalAverageP."+code+".dat")
globalVarP=np.loadtxt(args.pathToDir+"globalVarP."+code+".dat")
globalMaxP=np.loadtxt(args.pathToDir+"maxP."+code+".dat") #TODO don't forget them
globalMinP=np.loadtxt(args.pathToDir+"minP."+code+".dat")
if swaves:
globalMaxS=np.loadtxt(args.pathToDir+"maxS."+code+".dat")
globalMinS=np.loadtxt(args.pathToDir+"minS."+code+".dat")
globalAverageS=np.loadtxt(args.pathToDir+"globalAverageS."+code+".dat")
globalVarS=np.loadtxt(args.pathToDir+"globalVarS."+code+".dat")
globalVarVpVs=np.loadtxt(args.pathToDir+"globalVarVpVs."+code+".dat")
coordShots=np.loadtxt(args.pathToDir+nameOfShotsFile)
coordStats=np.loadtxt(args.pathToDir+nameOfStationsFile)
firstGuessP=np.loadtxt(args.pathToDir+nameOfFirstGuessP)
firstGuessS=np.loadtxt(args.pathToDir+nameOfFirstGuessS)
if analytical:
realP=np.loadtxt(args.pathToDir+nameOfrealP)
realS=np.loadtxt(args.pathToDir+nameOfrealS)
timesData=np.loadtxt(args.pathToDir+"calculatedTimes."+code+".dat")
else:
timesData=np.loadtxt(args.pathToDir+nameOfTimesFile)
if os.path.isfile(args.pathToDir+"bestModelTimes."+code+".dat"):
bestModelCalculated=True
bestModelTimes=np.loadtxt(args.pathToDir+"bestModelTimes."+code+".dat")
else:
bestModelCalculated=False
ll=np.loadtxt(args.pathToDir+"ll."+code+".dat")
if nPriorProfiles > 0:
for i in np.arange(nPriorProfiles):
priorP[i]=np.loadtxt(args.pathToDir+"priorProfiles"+code+"/priorProfileP."+code+"."+str(i)+".dat")
priorS[i]=np.loadtxt(args.pathToDir+"priorProfiles"+code+"/priorProfileS."+code+"."+str(i)+".dat")
nit=len(chains[0])
if args.verbose:
print "Loading done !"
print
print "During this simulation the",nbt,"chains have run during",nit,"steps"
if nit < 50:
print "Take care!! Below 50 iterations min and max profiles don't make sense!"
### --- Analyses --- ###
xmin=-1e99
ymin=-1e99
zmin=-1e99
xmax=1e99
ymax=1e99
zmax=1e99
### --- Analyses --- ###
if np.size(coordShots) > 3:
xmin = coordShots[:,0].min()
ymin = coordShots[:,1].min()
zmin = coordShots[:,2].min()
xmax = coordShots[:,0].max()
ymax = coordShots[:,1].max()
zmax = coordShots[:,2].max()
else:
xmin = coordShots[0]
ymin = coordShots[1]
zmin = coordShots[2]
xmax = coordShots[0]
ymax = coordShots[1]
zmax = coordShots[2]
if np.size(coordStats) > 3:
xmin = min(xmin,coordStats[:,0].min())
ymin = min(ymin,coordStats[:,1].min())
zmin = min(zmin,coordStats[:,2].min())
xmax = max(xmax,coordStats[:,0].max())
ymax = max(ymax,coordStats[:,1].max())
zmax = max(zmax,coordStats[:,2].max())
else:
xmin = min(xmin,coordStats[0])
ymin = min(ymin,coordStats[1])
zmin = min(zmin,coordStats[2])
xmax = max(xmax,coordStats[0])
ymax = max(ymax,coordStats[1])
zmax = max(zmax,coordStats[2])
xmin2 = xmin - (xmax-xmin)*coord_tol;
ymin2 = ymin - (ymax-ymin)*coord_tol;
xmax2 = xmax + (xmax-xmin)*coord_tol;
ymax2 = ymax + (ymax-ymin)*coord_tol;
xmin = xmin2
ymin = ymin2
xmax = xmax2
ymax = ymax2
z=firstGuessP[:,0]
zFilt=varPs[0][:,0]
nStats=coordStats.size/3
nShots=coordShots.size/3
plt.close('all')
if args.all:
args.geometry=True
args.energies=True
args.best=True
args.data=True
if swaves:
args.vpvs=True
# args.swaps=True
if args.energies:
plt.hold(True)
ii=0
nBest=len(glob.glob1(args.pathToDir,"bestPprofile*"))
E=[0]*nBest
iterationBest=[0]*nBest
chain=[0]*nBest
for bestModel in glob.glob1(args.pathToDir,"bestSprofile*"):
ii=ii+1
iterationBest[ii-1]=int(bestModel.split("idx")[1].split(".")[0])
chain[ii-1]=int(bestModel.split("chain")[1].split(".")[0])
E[ii-1]=float(bestModel.split("E")[1].split(code)[0].strip("."))
# TODO : this does not work if the energy value contains the code of the run : ex code=745 energy=745.23
if args.verbose:
print "Models kept after iteration : "+str(args.treshold)+" will be shown"
from operator import itemgetter
idxBest=0
itBestE=0
chainBest=0
if E:
idxBest=min(enumerate(E), key=itemgetter(1))[0] # index of best model
itBestE=iterationBest[idxBest]
chainBest=chain[idxBest]
chain=[chain[i] for i in np.arange(nBest) if iterationBest[i] > args.treshold]
iterationBest=[i for i in iterationBest if i>args.treshold]
iteration=np.arange(nit)
for i in np.arange(nbt):
if len(iteration) == len(chains[i][:,-1]):
plt.semilogy(iteration,chains[i][:,-1]*T[i])
else:
miniIteration = min(len(iteration),len(chains[i][:,-1]))
plt.semilogy(iteration[:miniIteration],chains[i][:miniIteration,-1]*T[i])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
flagForLabel=True
for j in np.arange(len(iterationBest)):
if flagForLabel:
plt.semilogy(iteration[iterationBest[j]], T[chain[j]]*chains[chain[j]][:,-1][iterationBest[j]], 'bD', label="Best models saved")
flagForLabel=False
else:
plt.semilogy(iteration[iterationBest[j]], T[chain[j]]*chains[chain[j]][:,-1][iterationBest[j]], 'bD')
plt.semilogy(itBestE, T[chainBest]*chains[chainBest][:,-1][itBestE], 'rD', label="Best model")
if recalculate_t0 is True:
if swaves:
plt.semilogy(iteration,np.zeros(nit)+nStats*nShots+ep,'b--',linewidth=2,label=r'Behind that line every model can be acceptable ($1\sigma$ misfit for each measurement)')
else:
plt.semilogy(iteration,np.zeros(nit)+nStats*nShots/2.0+ep,'b--',linewidth=2)
plt.legend(numpoints=1)
plt.semilogy(iteration,np.zeros(nit)+ep)
plt.xlim(xmax=iteration.max())
plt.rc('font', family='serif')
plt.xlabel('Iteration number',fontsize='14')
plt.ylabel('Energy',fontsize='14')
if args.geometry:
fig = plt.figure()
ax = fig.gca(projection='3d') #Axes3D(fig)
ax.hold(True)
ax.scatter(coordStats[:,0],coordStats[:,1],coordStats[:,2],zdir='z',s=20,c='b')
if (coordShots.size>3):
ax.scatter(coordShots[:,0],coordShots[:,1],coordShots[:,2],zdir='z',s=20,c='r',marker='^')
else:
ax.scatter(coordShots[0],coordShots[1],coordShots[2],zdir='z',s=200,c='r',marker='^')
ax.set_xlim3d(xmin,xmax)
ax.set_ylim3d(ymin,ymax)
ax.set_zlim3d(zmin,zmax)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
ax.set_zlabel('Z (m)')
ax.set_title('Geometry')
ax.invert_zaxis()
if args.data:
fig2 = plt.figure()
plt.hold(True)
plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95))
if (swaves):
plt.plot(firstGuessS[:,1],z,color=(0.5,0.95,0.5))
if analytical:
plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4)
if swaves:
plt.plot(realS[:,1],z,color=(0,0.5,0),linewidth=4)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'Wave speed ($m.s^{-1}$)',fontsize='14')
plt.ylabel(r'Depth ($m$)',fontsize='14')
if swaves:
plt.title(r'First guess velocity profiles',fontsize='14')
else:
plt.title(r'First guess velocity profile',fontsize='14')
plt.ylim(ymax=z.max())
plt.gca().invert_yaxis()
if nPriorProfiles > 0:
plt.figure()
plt.hold(True)
plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95))
if analytical:
plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4)
for i in np.arange(nPriorProfiles):
plt.plot(priorP[i][:,1],z)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'P wave velocity profiles from prior ($m.s^{-1}$)',fontsize='14')
plt.ylabel(r'Depth ($m$)',fontsize='14')
plt.ylim(ymin=z.max())
plt.ylim(ymax=z.min())
if swaves:
plt.figure()
plt.hold(True)
plt.plot(firstGuessS[:,1],z,color=(0.5,0.95,0.5))
if analytical:
plt.plot(realS[:,1],z,color=(0,0.5,0),linewidth=4)
for i in np.arange(nPriorProfiles):
plt.plot(priorS[i][:,1],z)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'S wave velocity profiles from prior ($m.s^{-1}$)',fontsize='14')
plt.ylabel(r'Depth ($m$)',fontsize='14')
plt.ylim(ymin=z.max())
plt.ylim(ymax=z.min())
if args.all:
chainsToPlot=np.arange(nbt)
elif args.resultsChain >= 0:
if args.resultsChain > nbt-1:
print "There were just ",nbt," chains running!"
print "-> maximum index : ",nbt-1
sys.exit(0)
chainsToPlot=np.array([args.resultsChain])
if (args.resultsChain >= 0) or args.all:
lb=qp+"\% confidence interval"
maxiP=globalMaxP[:,1].max()
miniP=globalMinP[:,1].min()
maxiS=globalMaxS[:,1].max()
miniS=globalMinS[:,1].min()
dp=(maxiP-miniP)/10
ds=(maxiS-miniS)/10
for i in chainsToPlot:
plt.figure()
plt.hold(True)
if args.show_ranges:
plt.plot(maxP[i][:,1],zFilt,color=(0.4,0.8,0.8),label="Range investigated by chain "+str(i))
plt.plot(minP[i][:,1],zFilt,color=(0.4,0.8,0.8))
if not args.dont_show_guess:
plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95),linewidth=4,label="First guess velocity profile")
if analytical:
plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4,label="Real velocity profile")
plt.plot(averagesP[i][:,1],zFilt,color=(0.5,0.5,0),linewidth=4,label="Average profile")
if not args.no_density_plots:
#pdense(zFilt,averagesP[i][:,1],np.sqrt(varPs[i][:,1]),M)
pdense(zFilt,(qSupPs[i][:,1]+qInfPs[i][:,1])/2,(qSupPs[i][:,1]-qInfPs[i][:,1])/2,M)
else:
#plt.plot(averagesP[i][:,1]+np.sqrt(varPs[i][:,1]),zFilt,color=(0.5,0.5,0),label="Standard deviation")
#plt.plot(averagesP[i][:,1]-np.sqrt(varPs[i][:,1]),zFilt,color=(0.5,0.5,0))
plt.plot(qSupPs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7),label=lb)
plt.plot(qInfPs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7))
if args.show_averages:
plt.plot(averagesP[i][:,1],zFilt,label="Average model")
if args.show_ranges:
plt.plot(globalMaxP[:,1],zFilt,color=(1,0,0),label="Range investigated by all chains")
plt.plot(globalMinP[:,1],zFilt,color=(1,0,0))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.title(r'Chain '+str(i),fontsize='14')
plt.xlabel(r'P waves velocity ($m.s^{-1}$)',fontsize='14')
plt.ylabel(r'Depth ($m$)',fontsize='14')
plt.xlim(miniP-dp, maxiP+dp)
plt.legend()
plt.ylim(ymin=zFilt.max())
plt.ylim(ymax=zFilt.min())
if swaves:
plt.figure()
if args.show_ranges:
plt.plot(maxS[i][:,1],zFilt,color=(0.4,0.8,0.8),label="Range investigated by chain "+str(i))
plt.plot(minS[i][:,1],zFilt,color=(0.4,0.8,0.8))
if not args.dont_show_guess:
plt.plot(firstGuessS[:,1],z,color=(0.5,0.95,0.5),linewidth=4,label="First guess velocity profile")
if analytical:
plt.plot(realS[:,1],z,color=(0,0.5,0),linewidth=4,label="Real velocity profile")
plt.plot(averagesS[i][:,1],zFilt,color=(0.5,0.5,0),linewidth=4,label="Average profile")
if not args.no_density_plots:
#pdense(zFilt,averagesS[i][:,1],np.sqrt(varSs[i][:,1]),M)
pdense(zFilt,(qSupSs[i][:,1]+qInfSs[i][:,1])/2,(qSupSs[i][:,1]-qInfSs[i][:,1])/2,M)
else:
#plt.plot(averagesS[i][:,1]+np.sqrt(varSs[i][:,1]),zFilt,color=(0.5,0.5,0),label="Standard deviation")
#plt.plot(averagesS[i][:,1]-np.sqrt(varSs[i][:,1]),zFilt,color=(0.5,0.5,0))
plt.plot(qSupSs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7),label=lb)
plt.plot(qInfSs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7))
if args.show_averages:
plt.plot(averagesS[i][:,1],zFilt,label="Average model")
if args.show_ranges:
plt.plot(globalMaxS[:,1],zFilt,color=(1,0,0),label="Range investigated by all chains")
plt.plot(globalMinS[:,1],zFilt,color=(1,0,0))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.title(r'Chain '+str(i),fontsize='14')
plt.xlabel(r'S waves velocity ($m.s^{-1}$)',fontsize='14')
plt.ylabel(r'Depth ($m$)',fontsize='14')
plt.xlim(miniS-ds, maxiS+ds)
plt.legend()
plt.ylim(ymin=zFilt.max())
plt.ylim(ymax=zFilt.min())
if args.results:
maxiP=globalMaxP[:,1].max()
miniP=globalMinP[:,1].min()
maxiS=globalMaxS[:,1].max()
miniS=globalMinS[:,1].min()
dp=(maxiP-miniP)/10
ds=(maxiS-miniS)/10
plt.figure()
plt.hold(True)
if not args.dont_show_guess:
plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95),linewidth=4,label="First guess velocity profile")
plt.plot(globalAverageP[:,1],zFilt,color=(0.5,0.5,0),linewidth=4,label="Global average")
if analytical:
plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4,label="Real velocity profile")
if not args.no_density_plots:
pdense(zFilt,globalAverageP[:,1],np.sqrt(globalVarP[:,1]),M)
else:
plt.plot(globalAverageP[:,1]+np.sqrt(globalVarP[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7),label="Standard deviation")
plt.plot(globalAverageP[:,1]-np.sqrt(globalVarP[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7))
if args.show_ranges:
plt.plot(globalMaxP[:,1],zFilt,color=(1,0,0),label="Range investigated by all chains")
plt.plot(globalMinP[:,1],zFilt,color=(1,0,0))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.title('Global results',fontsize=18)
plt.xlabel('P waves velocity (m.s$^{-1}$)',fontsize=18)
plt.ylabel('Depth (m)',fontsize=18)
plt.xlim(miniP-dp, maxiP+dp)
plt.legend()
plt.ylim(ymin=zFilt.max())
plt.ylim(ymax=zFilt.min())
if swaves:
plt.figure()
if not args.dont_show_guess:
plt.plot(firstGuessS[:,1],z,color=(0.5,0.95,0.5),linewidth=4,label="First guess velocity profile")
plt.plot(globalAverageS[:,1],zFilt,color=(0.5,0.5,0),linewidth=4,label="Global average")
if analytical:
plt.plot(realS[:,1],z,color=(0,0.5,0),linewidth=4,label="Real velocity profile")
if not args.no_density_plots:
pdense(zFilt,globalAverageS[:,1],np.sqrt(globalVarS[:,1]),M)
else:
plt.plot(globalAverageS[:,1]+np.sqrt(globalVarS[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7),label="Standard deviation")
plt.plot(globalAverageS[:,1]-np.sqrt(globalVarS[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7))
if args.show_ranges:
plt.plot(globalMaxS[:,1],zFilt,color=(1,0,0),label="Range investigated by all chains")
plt.plot(globalMinS[:,1],zFilt,color=(1,0,0))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.title('Global results',fontsize=18)
plt.xlabel('S waves velocity (m.s$^{-1}$)',fontsize=18)
plt.ylabel('Depth (m)',fontsize=18)
plt.xlim(miniS-ds, maxiS+ds)
plt.legend()
plt.ylim(ymin=zFilt.max())
plt.ylim(ymax=zFilt.min())
if args.best:
if bestModelCalculated:
diffDataBestModel=bestModelTimes-timesData
if recalculate_t0 or args.recalculate_t0:
for i in np.arange(nShots):
diffPshoti=diffDataBestModel[i*nStats:(i+1)*nStats,0][timesData[i*nStats:(i+1)*nStats,0]>0]
t0ShotsPi=diffPshoti.mean()
if args.verbose:
print "t0P[",i,"] = ",t0ShotsPi
diffDataBestModel[i*nStats:(i+1)*nStats,0]=diffDataBestModel[i*nStats:(i+1)*nStats,0]-t0ShotsPi
if swaves:
diffSshoti=diffDataBestModel[i*nStats:(i+1)*nStats,1][timesData[i*nStats:(i+1)*nStats,1]>0]
t0ShotsSi=diffSshoti.mean()
if args.verbose:
print "t0S[",i,"] = ",t0ShotsSi
diffDataBestModel[i*nStats:(i+1)*nStats,1]=diffDataBestModel[i*nStats:(i+1)*nStats,1]-t0ShotsSi
diffP=diffDataBestModel[:,0][timesData[:,0]>0]
diffS=diffDataBestModel[:,1][timesData[:,1]>0]
fig = plt.figure()
plt.hold(True)
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))+sigmaP,'b--',linewidth=2)
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))-sigmaP,'b--',linewidth=2)
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))+2*sigmaP,'--',color=(0.3,0.3,1),linewidth=1.5)
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))-2*sigmaP,'--',color=(0.3,0.3,1),linewidth=1.5)
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))+3*sigmaP,'--',color=(0.5,0.5,1))
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))-3*sigmaP,'--',color=(0.5,0.5,1))
plt.plot(np.arange(len(diffP)),diffP,'g+')
plt.ylim([-20*sigmaP,20*sigmaP])
plt.xlim([0,len(diffP)-1])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.text(0.6666*len(diffP), 1.1*sigmaP, r'$1\sigma$',fontsize='30',color='b')
plt.text(0.7708*len(diffP), 2.1*sigmaP, r'$2\sigma$',fontsize='30',color=(0.3,0.3,1))
plt.text(0.875*len(diffP), 3.1*sigmaP, r'$3\sigma$',fontsize='30',color=(0.5,0.5,1))
plt.xlabel(r'Receiver number',fontsize='14')
plt.ylabel(r'P waves arrival times residuals ($s$)',fontsize='14')
plt.title(r'Best model residuals',fontsize='14')
if swaves:
fig2 = plt.figure()
plt.hold(True)
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))+sigmaS,'b--',linewidth=2)
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))-sigmaS,'b--',linewidth=2)
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))+2*sigmaS,'--',color=(0.3,0.3,1),linewidth=1.5)
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))-2*sigmaS,'--',color=(0.3,0.3,1),linewidth=1.5)
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))+3*sigmaS,'--',color=(0.5,0.5,1))
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))-3*sigmaS,'--',color=(0.5,0.5,1))
#plt.plot(np.arange(len(diffS)),diffDataBestModel[:,1],'g+')
plt.plot(np.arange(len(diffS)),diffS,'g+')
plt.ylim([-20*sigmaP,20*sigmaP])
plt.xlim([0,len(diffS)-1])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.text(0.6666*len(diffS), 1.1*sigmaS, r'$1\sigma$',fontsize='30',color='b')
plt.text(0.7708*len(diffS), 2.1*sigmaS, r'$2\sigma$',fontsize='30',color=(0.3,0.3,1))
plt.text(0.875*len(diffS), 3.1*sigmaS, r'$3\sigma$',fontsize='30',color=(0.5,0.5,1))
plt.xlabel(r'Receiver number',fontsize='14')
plt.ylabel(r'S waves arrival times residuals ($s$)',fontsize='14')
plt.title(r'Best model residuals',fontsize='14')
E=sum((diffP/sigmaP)**2/2)+sum((diffS/sigmaS)**2/2)+ep
else:
E=sum((diffP/sigmaP)**2/2)+ep
if args.verbose:
print "Energy of best model :",E
else:
print "The best model has not been calculated"
nBest=len(glob.glob1(args.pathToDir,"bestPprofile*"))
if args.verbose:
print "Number of good model kept : ",nBest
bestP=[0]*nBest
bestS=[0]*nBest
ii=0
iterationBest=[0]*nBest
EP=[0]*nBest
ES=[0]*nBest
for bestModel in glob.glob1(args.pathToDir,"bestPprofile*"):
ii=ii+1
chain=bestModel.split("chain")[1].split(".")[0]
idx=bestModel.split("idx")[1].split(".")[0]
iterationBest[ii-1]=int(idx)
EP[ii-1]=float(bestModel.split("E")[1].split(code)[0].strip("."))
bestP[ii-1]=np.loadtxt(args.pathToDir+bestModel)
if args.verbose:
print "Model number : ",ii, " -> ",bestModel," generated by chain ",chain," at iteration ",idx," (energy "+str(EP[ii-1])+")"
ii=0
for bestModel in glob.glob1(args.pathToDir,"bestSprofile*"):
ii=ii+1
chain=bestModel.split("chain")[1].split(".")[0]
idx=bestModel.split("idx")[1].split(".")[0]
ES[ii-1]=float(bestModel.split("E")[1].split(code)[0].strip("."))
bestS[ii-1]=np.loadtxt(args.pathToDir+bestModel)
if args.verbose:
print "Model number : ",ii, " -> ",bestModel," generated by chain ",chain," at iteration ",idx," (energy "+str(ES[ii-1])+")"
maxiP=globalMaxP[:,1].max()
miniP=globalMinP[:,1].min()
maxiS=globalMaxS[:,1].max()
miniS=globalMinS[:,1].min()
dp=(maxiP-miniP)/10
ds=(maxiS-miniS)/10
from operator import itemgetter
idxBestP=min(enumerate(EP), key=itemgetter(1))[0] # index of best model
idxBestS=min(enumerate(ES), key=itemgetter(1))[0] # index of best S model (it is the same one!)
plt.figure()
if analytical:
plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4,label="Real velocity profile")
if args.verbose:
print "Models kept after iteration : "+str(args.treshold)+" will be shown"
for i in np.arange(nBest):
if iterationBest[i] > args.treshold:
plt.hold(True)
if i == idxBestP:
plt.plot(bestP[i][:,1],zFilt,linewidth=4,label="Best model")
else:
plt.plot(bestP[i][:,1],zFilt)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'Best P wave velocity models in ($m.s^{-1}$)',fontsize='14')
plt.ylabel(r'Depth ($m$)',fontsize='14')
plt.xlim(miniP-dp, maxiP+dp)
plt.ylim(ymax=z.max())
plt.gca().invert_yaxis()
plt.legend()
if swaves:
plt.figure()
if analytical:
plt.plot(realS[:,1],z,color=(0,0,0.5),linewidth=4,label="Real velocity profile")
for i in np.arange(nBest):
if iterationBest[i] > args.treshold:
plt.hold(True)
if i == idxBestS:
plt.plot(bestS[i][:,1],zFilt,linewidth=4,label="Best model")
else:
plt.plot(bestS[i][:,1],zFilt)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'Best S wave velocity models in ($m.s^{-1}$)',fontsize='14')
plt.ylabel(r'Depth ($m$)',fontsize='14')
plt.xlim(miniS-ds, maxiS+ds)
plt.ylim(ymax=z.max())
plt.gca().invert_yaxis()
plt.legend()
################################ VP/VS ################################
if args.vpvs:
if not swaves:
print "Impossible to print Vp/Vs ratio as Swaves had not been calculated during the algorithm"
else:
for i in np.arange(nbt):
plt.figure()
plt.hold(True)
vpFractionalUncertainty = 100*np.sqrt(varPs[i][:,1])/averagesP[i][:,1] # Ex: is vp=2500+/-100m/s -> fractional uncertainty 4% vp = 2500+/-4%
vsFractionalUncertainty = 100*np.sqrt(varSs[i][:,1])/averagesS[i][:,1] # Ex: is vs=2500+/-100m/s -> fractional uncertainty 4% vs = 2500+/-4%
ratioFractionalUncertainty = vpFractionalUncertainty + vsFractionalUncertainty
meanRatio = averagesP[i][:,1]/averagesS[i][:,1]
numericalUncertainty = meanRatio*(vpFractionalUncertainty + vsFractionalUncertainty)/100
plt.plot(meanRatio + numericalUncertainty,zFilt,linestyle='--',color=(0.3,0.3,0.7),label="Standard deviation")
plt.plot(meanRatio - numericalUncertainty,zFilt,linestyle='--',color=(0.3,0.3,0.7))
plt.plot(meanRatio,zFilt,label="Average Vp/Vs for chain "+str(i))
# Load best profiles :
nBest=len(glob.glob1(args.pathToDir,"bestPprofile*"))
bestP=[0]*nBest
bestS=[0]*nBest
EP=[0]*nBest
ES=[0]*nBest
ii=0
for bestModel in glob.glob1(args.pathToDir,"bestPprofile*"):
ii=ii+1
EP[ii-1]=float(bestModel.split("E")[1].split(code)[0].strip("."))
bestP[ii-1]=np.loadtxt(args.pathToDir+bestModel)
ii=0
for bestModel in glob.glob1(args.pathToDir,"bestSprofile*"):
ii=ii+1
ES[ii-1]=float(bestModel.split("E")[1].split(code)[0].strip("."))
bestS[ii-1]=np.loadtxt(args.pathToDir+bestModel)
from operator import itemgetter
idxBestP=[]
idxBestS=[]
if EP:
idxBestP=min(enumerate(EP), key=itemgetter(1))[0] # index of best model
if ES:
idxBestS=min(enumerate(ES), key=itemgetter(1))[0] # index of best S model (it is the same one!)
# End of loading best profiles
if EP:
plt.plot(bestP[idxBestP][:,1]/bestS[idxBestS][:,1],zFilt,linewidth=4,label="Vp/Vs of the best model")
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'Ratio Vp/Vs',fontsize='14')
plt.ylabel(r'Depth ($m$)',fontsize='14')
#plt.ylim(ymax=zFilt.max())
plt.ylim(ymin=zFilt.max())
plt.ylim(ymax=zFilt.min())
plt.legend()
plt.figure()
plt.hold(True)
vpFractionalUncertainty = 100*np.sqrt(globalVarP[:,1])/globalAverageP[:,1] # Ex: is vp=2500+/-100m/s -> fractional uncertainty 4% vp = 2500+/-4%
vsFractionalUncertainty = 100*np.sqrt(globalVarS[:,1])/globalAverageS[:,1] # Ex: is vs=2500+/-100m/s -> fractional uncertainty 4% vs = 2500+/-4%
ratioFractionalUncertainty = vpFractionalUncertainty + vsFractionalUncertainty
meanRatio = globalAverageP[:,1]/globalAverageS[:,1]
numericalUncertainty = meanRatio*(vpFractionalUncertainty + vsFractionalUncertainty)/100
plt.plot(meanRatio + np.sqrt(globalVarVpVs[:,1])/3,zFilt,linestyle='--',color=(0.3,0.5,0.7),label="Real standard deviation")
plt.plot(meanRatio - np.sqrt(globalVarVpVs[:,1])/3,zFilt,linestyle='--',color=(0.3,0.5,0.7))
plt.plot(meanRatio + numericalUncertainty,zFilt,linestyle='--',color=(0.3,0.3,0.7),label="Approx standard deviation")
plt.plot(meanRatio - numericalUncertainty,zFilt,linestyle='--',color=(0.3,0.3,0.7))
plt.plot(meanRatio,zFilt,label="Global average Vp/Vs")
# Load best profiles :
nBest=len(glob.glob1(args.pathToDir,"bestPprofile*"))
bestP=[0]*nBest
bestS=[0]*nBest
EP=[0]*nBest
ES=[0]*nBest
ii=0
for bestModel in glob.glob1(args.pathToDir,"bestPprofile*"):
ii=ii+1
EP[ii-1]=float(bestModel.split("E")[1].split(code)[0].strip("."))
bestP[ii-1]=np.loadtxt(args.pathToDir+bestModel)
ii=0
for bestModel in glob.glob1(args.pathToDir,"bestSprofile*"):
ii=ii+1
ES[ii-1]=float(bestModel.split("E")[1].split(code)[0].strip("."))
bestS[ii-1]=np.loadtxt(args.pathToDir+bestModel)
from operator import itemgetter
idxBestP=[]
idxBestS=[]
if EP:
idxBestP=min(enumerate(EP), key=itemgetter(1))[0] # index of best model
if ES:
idxBestS=min(enumerate(ES), key=itemgetter(1))[0] # index of best S model (it is the same one!)
# End of loading best profiles
if EP:
plt.plot(bestP[idxBestP][:,1]/bestS[idxBestS][:,1],zFilt,linewidth=4,label="Vp/Vs of the best model")
if analytical:
plt.plot(realP[:,1]/realS[:,1],z,color=(0,0,0.5),linewidth=4,label="Real Vp/Vs")
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'Ratio Vp/Vs',fontsize='14')
plt.ylabel(r'Depth ($m$)',fontsize='14')
#plt.ylim(ymax=zFilt.max())
plt.ylim(ymin=zFilt.max())
plt.ylim(ymax=zFilt.min())
plt.legend()
if args.filtering:
for i,filteredCurve in enumerate(filteredPcurve):
fig = plt.figure(figsize=(4,6))
plt.hold(True)
plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95))
#if analytical:
# plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4)
plt.plot(filteredCurve[:,1],z,color=(0.5,0,0),linewidth=4,label=waveletFiltered[i])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'Wave speed (m.s$^{-1}$)',fontsize=16)
plt.ylabel(r'Depth (m)',fontsize=16)
plt.ylim(ymax=z.max())
plt.gca().invert_yaxis()
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.locator_params(nbins=5)
plt.legend()
################################ SWAPS ################################
if args.swaps:
print "Not implemented for now"
plt.figure()
plt.plot(ll[ll[:,2]==3,1],ll[ll[:,1]==4,2])
#plot(exch(exch(:,2)==3,1),exch(exch(:,2)==3,3),'k')
# plot(exch(exch(:,2)==3,1),exch(exch(:,2)==3,3),'y')
# plot(exch(exch(:,2)==2,1),exch(exch(:,2)==2,3),'c')
#plot(exch(exch(:,2)==1,1),exch(exch(:,2)==1,3),'r')
#plot(exch(exch(:,2)==0,1),exch(exch(:,2)==0,3),'b')
################################ PAPER ################################
if args.paper:
### GEOMETRY ###
fig = plt.figure()
ax = fig.gca(projection='3d') #Axes3D(fig)
ax.hold(True)
ax.scatter(coordStats[:,0],coordStats[:,1],coordStats[:,2],zdir='z',s=20,c='b')
if (coordShots.size>3):
ax.scatter(coordShots[:,0],coordShots[:,1],coordShots[:,2],zdir='z',s=20,c='r',marker='^')
else:
ax.scatter(coordShots[0],coordShots[1],coordShots[2],zdir='z',s=200,c='r',marker='^')
ax.set_xlim3d(xmin,xmax)
ax.set_ylim3d(ymin,ymax)
ax.set_zlim3d(zmin,zmax)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
ax.set_zlabel('Z (m)')
ax.invert_zaxis()
### LOGS AND PRIOR ###
fontsize=18
fig2 = plt.figure(figsize=(6,8))
plt.gcf().subplots_adjust(left=0.15)
plt.hold(True)
plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95))
if (swaves):
plt.plot(firstGuessS[:,1],z,color=(0.5,0.95,0.5))
if analytical:
plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4)
if swaves:
plt.plot(realS[:,1],z,color=(0,0.5,0),linewidth=4)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Wave speed (m.s$^{-1}$)',fontsize=fontsize)
plt.ylabel('Depth (m)',fontsize=fontsize)
plt.tick_params(axis='x', labelsize=fontsize-2)
plt.tick_params(axis='y', labelsize=fontsize-2)
plt.ylim(ymax=z.max())
plt.gca().invert_yaxis()
if nPriorProfiles > 0:
plt.figure(figsize=(6,8))
plt.gcf().subplots_adjust(left=0.15)
plt.hold(True)
plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95),alpha=0.5,label="P wave sonic log")
for i in np.arange(nPriorProfiles):
if i%3 == 1:
if i == 1:
plt.plot(priorP[i][:,1],z,'m',label="P wave velocity profiles from prior")
else:
plt.plot(priorP[i][:,1],z,'m')
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Velocity (m.s$^{-1}$)',fontsize=fontsize)
plt.ylabel('Depth (m)',fontsize=fontsize)
plt.tick_params(axis='x', labelsize=fontsize-2)
plt.tick_params(axis='y', labelsize=fontsize-2)
plt.ylim(ymin=z.max())
plt.ylim(ymax=z.min())
plt.legend(loc=0,fontsize=16)
if swaves:
plt.figure(figsize=(6,8))
plt.gcf().subplots_adjust(left=0.15)
plt.hold(True)
plt.plot(firstGuessS[:,1],z,color=(0.5,0.95,0.5),alpha=0.5,label="S wave sonic log")
for i in np.arange(nPriorProfiles):
if i%3 == 1:
if i == 1:
plt.plot(priorS[i][:,1],z,'y',label="S wave velocity profiles from prior")
else:
plt.plot(priorS[i][:,1],z,'y')
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Velocity (m.s$^{-1}$)',fontsize=fontsize)
plt.ylabel('Depth (m)',fontsize=fontsize)
plt.ylim(ymin=z.max())
plt.ylim(ymax=z.min())
plt.tick_params(axis='x', labelsize=fontsize-2)
plt.tick_params(axis='y', labelsize=fontsize-2)
plt.legend(loc=0,fontsize=16)
### ENERGY ###
plt.figure(figsize=(8,6))
plt.hold(True)
ii=0
nBest=len(glob.glob1(args.pathToDir,"bestPprofile*"))
E=[0]*nBest
iterationBest=[0]*nBest
chain=[0]*nBest
for bestModel in glob.glob1(args.pathToDir,"bestSprofile*"):
ii=ii+1
iterationBest[ii-1]=int(bestModel.split("idx")[1].split(".")[0])
chain[ii-1]=int(bestModel.split("chain")[1].split(".")[0])
E[ii-1]=float(bestModel.split("E")[1].split(code)[0].strip("."))
# TODO : this does not work if the energy value contains the code of the run : ex code=745 energy=745.23
if args.verbose:
print "Models kept after iteration : "+str(args.treshold)+" will be shown"
from operator import itemgetter
idxBest=min(enumerate(E), key=itemgetter(1))[0] # index of best model
itBestE=iterationBest[idxBest]
chainBest=chain[idxBest]
chain=[chain[i] for i in np.arange(nBest) if iterationBest[i] > args.treshold]
iterationBest=[i for i in iterationBest if i>args.treshold]
iteration=np.arange(nit)
for i in np.arange(nbt):
if i<20:
if len(iteration) == len(chains[i][:,-1]):
plt.semilogy(iteration,chains[i][:,-1]*T[i])
else:
miniIteration = min(len(iteration),len(chains[i][:,-1]))
plt.semilogy(iteration[:miniIteration],chains[i][:miniIteration,-1]*T[i])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
flagForLabel=True
plt.semilogy(itBestE, T[chainBest]*chains[chainBest][:,-1][itBestE], 'rD', label="Best model")
if recalculate_t0 is True:
if swaves:
plt.semilogy(iteration,np.zeros(nit)+nStats*nShots+ep,'b--',linewidth=2) #,label=r'Behind that line every model can be acceptable ($1\sigma$ misfit for each measurement)')
else:
plt.semilogy(iteration,np.zeros(nit)+nStats*nShots/2.0+ep,'b--',linewidth=2)
plt.semilogy(iteration,np.zeros(nit)+ep,label="Prior's energy")
plt.xlim(xmax=iteration.max())
plt.rc('font', family='serif')
plt.xlabel('Iteration',fontsize=18)
#plt.ylim(ymin=1500,ymax=2300)
plt.ylabel('Energy',fontsize=18)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.legend(loc=0,numpoints=1,fontsize=16)
### VP/VS ###
plt.figure(figsize=(8,10))
plt.hold(True)
plt.plot(firstGuessP[:,1]/firstGuessS[:,1],z,color=(0.95,0.9,0.9),linewidth=1,label="Sonic logs Vp/Vs", zorder=1)
vpFractionalUncertainty = 100*np.sqrt(globalVarP[:,1])/globalAverageP[:,1] # Ex: is vp=2500+/-100m/s -> fractional uncertainty 4% vp = 2500+/-4%
vsFractionalUncertainty = 100*np.sqrt(globalVarS[:,1])/globalAverageS[:,1] # Ex: is vs=2500+/-100m/s -> fractional uncertainty 4% vs = 2500+/-4%
ratioFractionalUncertainty = vpFractionalUncertainty + vsFractionalUncertainty
meanRatio = globalAverageP[:,1]/globalAverageS[:,1]
numericalUncertainty = meanRatio*(vpFractionalUncertainty + vsFractionalUncertainty)/100
#pdense(zFilt,meanRatio,numericalUncertainty,M)
pdense(zFilt,meanRatio,np.sqrt(globalVarVpVs[:,1])/3,M)
plt.plot(meanRatio + np.sqrt(globalVarVpVs[:,1])/3,zFilt,linestyle='--',color=(0.3,0.3,0.7),label="Standard deviation")
plt.plot(meanRatio - np.sqrt(globalVarVpVs[:,1])/3,zFilt,linestyle='--',color=(0.3,0.3,0.7))
#plt.plot(meanRatio + numericalUncertainty,zFilt,linestyle='--',color=(0.3,0.3,0.7),label="Standard deviation")
#plt.plot(meanRatio - numericalUncertainty,zFilt,linestyle='--',color=(0.3,0.3,0.7))
plt.plot(meanRatio,zFilt,label="Average Vp/Vs")
# Load best profiles :
nBest=len(glob.glob1(args.pathToDir,"bestPprofile*"))
bestP=[0]*nBest
bestS=[0]*nBest
EP=[0]*nBest
ES=[0]*nBest
ii=0
for bestModel in glob.glob1(args.pathToDir,"bestPprofile*"):
ii=ii+1
EP[ii-1]=float(bestModel.split("E")[1].split(code)[0].strip("."))
bestP[ii-1]=np.loadtxt(args.pathToDir+bestModel)
ii=0
for bestModel in glob.glob1(args.pathToDir,"bestSprofile*"):
ii=ii+1
ES[ii-1]=float(bestModel.split("E")[1].split(code)[0].strip("."))
bestS[ii-1]=np.loadtxt(args.pathToDir+bestModel)
from operator import itemgetter
idxBestP=min(enumerate(EP), key=itemgetter(1))[0] # index of best model
idxBestS=min(enumerate(ES), key=itemgetter(1))[0] # index of best S model (it is the same one!)
# End of loading best profiles
plt.plot(bestP[idxBestP][:,1]/bestS[idxBestS][:,1],zFilt,'g',linewidth=2,label="Vp/Vs of the best model")
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'Ratio Vp/Vs',fontsize=18)
plt.ylabel(r'Depth (m)',fontsize=18)
#plt.ylim(ymax=zFilt.max())
plt.ylim(ymin=zFilt.max())
plt.ylim(ymax=zFilt.min())
plt.xlim([0.5,3.5])
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.legend(loc=4,fontsize=16)
### RESIDUALS ###
if bestModelCalculated:
diffDataBestModel=bestModelTimes-timesData
if recalculate_t0 or args.recalculate_t0:
for i in np.arange(nShots):
diffPshoti=diffDataBestModel[i*nStats:(i+1)*nStats,0][timesData[i*nStats:(i+1)*nStats,0]>0]
t0ShotsPi=diffPshoti.mean()
diffDataBestModel[i*nStats:(i+1)*nStats,0]=diffDataBestModel[i*nStats:(i+1)*nStats,0]-t0ShotsPi
if swaves:
diffSshoti=diffDataBestModel[i*nStats:(i+1)*nStats,1][timesData[i*nStats:(i+1)*nStats,1]>0]
t0ShotsSi=diffSshoti.mean()
diffDataBestModel[i*nStats:(i+1)*nStats,1]=diffDataBestModel[i*nStats:(i+1)*nStats,1]-t0ShotsSi
diffP=diffDataBestModel[:,0][timesData[:,0]>0]
diffS=diffDataBestModel[:,1][timesData[:,1]>0]
fig = plt.figure()
plt.hold(True)
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))+sigmaP,'b--',linewidth=2)
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))-sigmaP,'b--',linewidth=2)
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))+2*sigmaP,'--',color=(0.3,0.3,1),linewidth=1.5)
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))-2*sigmaP,'--',color=(0.3,0.3,1),linewidth=1.5)
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))+3*sigmaP,'--',color=(0.5,0.5,1))
plt.plot(np.arange(len(diffP)),np.zeros(len(diffP))-3*sigmaP,'--',color=(0.5,0.5,1))
plt.plot(np.arange(len(diffP)),diffP,'g+')
plt.ylim([-10*sigmaP,10*sigmaP])
plt.xlim([0,len(diffP)-1])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.text(0.6666*len(diffP), 1.1*sigmaP, r'$1\sigma$',fontsize=30,color='b')
plt.text(0.7708*len(diffP), 2.1*sigmaP, r'$2\sigma$',fontsize=30,color=(0.3,0.3,1))
plt.text(0.875*len(diffP), 3.1*sigmaP, r'$3\sigma$',fontsize=30,color=(0.5,0.5,1))
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.xlabel('Receiver number',fontsize=18)
plt.ylabel('P waves arrival times residuals (s)',fontsize=18)
if swaves:
fig2 = plt.figure()
plt.hold(True)
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))+sigmaS,'b--',linewidth=2)
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))-sigmaS,'b--',linewidth=2)
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))+2*sigmaS,'--',color=(0.3,0.3,1),linewidth=1.5)
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))-2*sigmaS,'--',color=(0.3,0.3,1),linewidth=1.5)
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))+3*sigmaS,'--',color=(0.5,0.5,1))
plt.plot(np.arange(len(diffS)),np.zeros(len(diffS))-3*sigmaS,'--',color=(0.5,0.5,1))
#plt.plot(np.arange(len(diffS)),diffDataBestModel[:,1],'g+')
plt.plot(np.arange(len(diffS)),diffS,'g+')
plt.ylim([-10*sigmaP,10*sigmaP])
plt.xlim([0,len(diffS)-1])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.text(0.6666*len(diffS), 1.1*sigmaS, r'$1\sigma$',fontsize=30,color='b')
plt.text(0.7708*len(diffS), 2.1*sigmaS, r'$2\sigma$',fontsize=30,color=(0.3,0.3,1))
plt.text(0.875*len(diffS), 3.1*sigmaS, r'$3\sigma$',fontsize=30,color=(0.5,0.5,1))
plt.xlabel('Receiver number',fontsize=18)
plt.ylabel('S waves arrival times residuals (s)',fontsize=18)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
E=sum((diffP/sigmaP)**2/2)+sum((diffS/sigmaS)**2/2)+ep
else:
E=sum((diffP/sigmaP)**2/2)+ep
else:
print "The best model has not been calculated"
### RESULTS ###
# lb=qp+"\% confidence interval"
# maxiP=globalMaxP[:,1].max()
# miniP=globalMinP[:,1].min()
# maxiS=globalMaxS[:,1].max()
# miniS=globalMinS[:,1].min()
# plt.figure(figsize=(8,10))
# plt.hold(True)
# plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95),linewidth=1,label="Sonic log", alpha=0.4, zorder=1)
# if analytical:
# plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4,label="Real velocity profile")
# plt.plot(globalAverageP[:,1],zFilt,linewidth=4,label="Global average")
# pdense(zFilt,globalAverageP[:,1],np.sqrt(globalVarP[:,1]),M)
# plt.plot(globalAverageP[:,1]+np.sqrt(globalVarP[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7),label="Standard deviation")
# plt.plot(globalAverageP[:,1]-np.sqrt(globalVarP[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7))
# plt.plot(bestP[idxBestP][:,1],zFilt,linewidth=4,label="Best model")
# if args.show_ranges:
# plt.plot(globalMaxP[:,1],zFilt,color=(1,0,0),label="Range investigated by all chains")
# plt.plot(globalMinP[:,1],zFilt,color=(1,0,0))
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
# plt.xlabel('P wave velocity (m.s$^{-1}$)',fontsize=18)
# plt.ylabel('Depth (m)',fontsize=18)
# plt.xlim(2500, 6500)
# plt.legend(fontsize=16)
# plt.ylim(ymin=zFilt.max())
# plt.ylim(ymax=zFilt.min())
# plt.tick_params(axis='x', labelsize=16)
# plt.tick_params(axis='y', labelsize=16)
# if swaves:
# plt.figure(figsize=(8,10))
# if args.show_ranges:
# plt.plot(globalMaxP[:,1],zFilt,color=(1,0,0),label="Range investigated by all chains")
# plt.plot(globalMinP[:,1],zFilt,color=(1,0,0))
# plt.plot(firstGuessS[:,1],z,color=(0.5,0.95,0.5),linewidth=1,label="Sonic log", alpha=0.4, zorder=1)
# if analytical:
# plt.plot(realS[:,1],z,color=(0,0.5,0),linewidth=4,label="Real velocity profile")
# plt.plot(globalAverageS[:,1],zFilt,linewidth=4,label="Global average")
# pdense(zFilt,globalAverageS[:,1],np.sqrt(globalVarS[:,1]),M)
# plt.plot(globalAverageS[:,1]+np.sqrt(globalVarS[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7),label="Standard deviation")
# plt.plot(globalAverageS[:,1]-np.sqrt(globalVarP[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7))
# plt.plot(bestS[idxBestS][:,1],zFilt,linewidth=4,label="Best model")
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
# plt.xlabel('S wave velocity (m.s$^{-1}$)',fontsize=18)
# plt.ylabel('Depth (m)',fontsize=18)
# plt.xlim(1000,5000)
# plt.legend(fontsize=16)
# plt.ylim(ymin=zFilt.max())
# plt.ylim(ymax=zFilt.min())
# plt.tick_params(axis='x', labelsize=16)
# plt.tick_params(axis='y', labelsize=16)
### RESULTS ###
lb=qp+"\% confidence interval"
maxiP=globalMaxP[:,1].max()
miniP=globalMinP[:,1].min()
maxiS=globalMaxS[:,1].max()
miniS=globalMinS[:,1].min()
plt.figure(figsize=(8,10))
plt.hold(True)
plt.plot(firstGuessP[:,1],z,color=(0.9,0.9,0.99),linewidth=1,label="Sonic log", zorder=1)
#plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95),linewidth=1,label="Sonic log", alpha=0.2, zorder=1)
if analytical:
plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4,label="Real velocity profile")
pdense(zFilt,globalAverageP[:,1],np.sqrt(globalVarP[:,1]),M)
#pdense(zFilt,(qSupPs[i][:,1]+qInfPs[i][:,1])/2,(qSupPs[i][:,1]-qInfPs[i][:,1])/2,M)
#plt.plot(qSupPs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7),label=lb)
#plt.plot(qInfPs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7))
plt.plot(globalAverageP[:,1],zFilt,linewidth=2,label="Average model")
plt.plot(bestP[idxBestP][:,1],zFilt,linewidth=2,label="Best model")
plt.plot(globalAverageP[:,1]+np.sqrt(globalVarP[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7),label="Standard deviation")
plt.plot(globalAverageP[:,1]-np.sqrt(globalVarP[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7))
if args.show_ranges:
plt.plot(globalMaxP[:,1],zFilt,color=(1,0,0),label="Range investigated by all chains")
plt.plot(globalMinP[:,1],zFilt,color=(1,0,0))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('P wave velocity (m.s$^{-1}$)',fontsize=18)
plt.ylabel('Depth (m)',fontsize=18)
plt.xlim(2000, 7500)
plt.legend(loc=0,fontsize=16)
plt.ylim(ymin=zFilt.max())
plt.ylim(ymax=zFilt.min())
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
if swaves:
plt.figure(figsize=(8,10))
if args.show_ranges:
plt.plot(maxS[i][:,1],zFilt,color=(0.4,0.8,0.8),label="Range investigated by chain "+str(i))
plt.plot(minS[i][:,1],zFilt,color=(0.4,0.8,0.8))
plt.plot(firstGuessS[:,1],z,color=(0.89,0.98,0.89),linewidth=1,label="Sonic log",zorder=1)
if analytical:
plt.plot(realS[:,1],z,color=(0,0.5,0),linewidth=4,label="Real velocity profile")
#pdense(zFilt,(qSupSs[i][:,1]+qInfSs[i][:,1])/2,(qSupSs[i][:,1]-qInfSs[i][:,1])/2,M)
pdense(zFilt,globalAverageS[:,1],np.sqrt(globalVarS[:,1]),M)
#plt.plot(qSupSs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7),label=lb)
#plt.plot(qInfSs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7))
plt.plot(globalAverageS[:,1],zFilt,linewidth=2,label="Average model")
plt.plot(bestS[idxBestS][:,1],zFilt,linewidth=2,label="Best model")
plt.plot(globalAverageS[:,1]+np.sqrt(globalVarS[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7),label="Standard deviation")
plt.plot(globalAverageS[:,1]-np.sqrt(globalVarS[:,1]),zFilt,linestyle='--',color=(0.3,0.3,0.7))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('S wave velocity (m.s$^{-1}$)',fontsize=18)
plt.ylabel('Depth (m)',fontsize=18)
plt.xlim(500,4500)
plt.legend(fontsize=16,loc=0)
plt.ylim(ymin=zFilt.max())
plt.ylim(ymax=zFilt.min())
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
### Filtered Curves ###
for i,filteredCurve in enumerate(filteredPcurve):
fig = plt.figure(figsize=(4,6))
plt.gcf().subplots_adjust(left=0.22)
plt.hold(True)
plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95))
#if analytical:
# plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4)
plt.plot(filteredCurve[:,1],z,color=(0.5,0,0),linewidth=4,label=waveletFiltered[i])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'Wave speed (m.s$^{-1}$)',fontsize=18)
plt.ylabel(r'Depth (m)',fontsize=18)
plt.ylim(ymax=z.max())
plt.gca().invert_yaxis()
plt.legend(fontsize=16)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.locator_params(nbins=5)
plt.show()
# ### RESULTS ###
# lb=qp+"\% confidence interval"
# maxiP=globalMaxP[:,1].max()
# miniP=globalMinP[:,1].min()
# maxiS=globalMaxS[:,1].max()
# miniS=globalMinS[:,1].min()
# for i in [2]:
# plt.figure(figsize=(8,10))
# plt.hold(True)
# plt.plot(firstGuessP[:,1],z,color=(0.5,0.5,0.95),linewidth=1,label="Sonic log", alpha=0.4, zorder=1)
# if analytical:
# plt.plot(realP[:,1],z,color=(0,0,0.5),linewidth=4,label="Real velocity profile")
# pdense(zFilt,(qSupPs[i][:,1]+qInfPs[i][:,1])/2,(qSupPs[i][:,1]-qInfPs[i][:,1])/2,M)
# plt.plot(qSupPs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7),label=lb)
# plt.plot(qInfPs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7))
# plt.plot(averagesP[i][:,1],zFilt,linewidth=4,label="Average model")
# plt.plot(bestP[idxBestP][:,1],zFilt,linewidth=4,label="Best model")
# # plt.plot(averagesP[i][:,1]+np.sqrt(varPs[i][:,1]),zFilt,color=(0.5,0.5,0),label="Standard deviation")
# # plt.plot(averagesP[i][:,1]-np.sqrt(varPs[i][:,1]),zFilt,color=(0.5,0.5,0))
# if args.show_ranges:
# plt.plot(globalMaxP[:,1],zFilt,color=(1,0,0),label="Range investigated by all chains")
# plt.plot(globalMinP[:,1],zFilt,color=(1,0,0))
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
# plt.xlabel('P wave velocity (m.s$^{-1}$)',fontsize=18)
# plt.ylabel('Depth (m)',fontsize=18)
# plt.xlim(2500, 6500)
# plt.legend(fontsize=16)
# plt.ylim(ymin=zFilt.max())
# plt.ylim(ymax=zFilt.min())
# plt.tick_params(axis='x', labelsize=16)
# plt.tick_params(axis='y', labelsize=16)
# if swaves:
# plt.figure(figsize=(8,10))
# if args.show_ranges:
# plt.plot(maxS[i][:,1],zFilt,color=(0.4,0.8,0.8),label="Range investigated by chain "+str(i))
# plt.plot(minS[i][:,1],zFilt,color=(0.4,0.8,0.8))
# plt.plot(firstGuessS[:,1],z,color=(0.5,0.95,0.5),linewidth=1,label="Sonic log", alpha=0.4, zorder=1)
# if analytical:
# plt.plot(realS[:,1],z,color=(0,0.5,0),linewidth=4,label="Real velocity profile")
# pdense(zFilt,(qSupSs[i][:,1]+qInfSs[i][:,1])/2,(qSupSs[i][:,1]-qInfSs[i][:,1])/2,M)
# plt.plot(qSupSs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7),label=lb)
# plt.plot(qInfSs[i][:,1],zFilt,linestyle='--',color=(0.3,0.3,0.7))
# plt.plot(averagesS[i][:,1],zFilt,linewidth=4,label="Average model")
# plt.plot(bestS[idxBestS][:,1],zFilt,linewidth=4,label="Best model")
# # plt.plot(averagesS[i][:,1]+np.sqrt(varSs[i][:,1]),zFilt,color=(0.5,0.5,0),label="Standard deviation")
# # plt.plot(averagesS[i][:,1]-np.sqrt(varSs[i][:,1]),zFilt,color=(0.5,0.5,0))
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
# plt.xlabel('S wave velocity (m.s$^{-1}$)',fontsize=18)
# plt.ylabel('Depth (m)',fontsize=18)
# plt.xlim(1000,5000)
# plt.legend(fontsize=16)
# plt.ylim(ymin=zFilt.max())
# plt.ylim(ymax=zFilt.min())
# plt.tick_params(axis='x', labelsize=16)
# plt.tick_params(axis='y', labelsize=16)
|
|
from twilio.jwt.access_token import AccessTokenGrant
import warnings
import functools
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.'''
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return new_func
class ChatGrant(AccessTokenGrant):
"""Grant to access Twilio Chat"""
def __init__(self, service_sid=None, endpoint_id=None,
deployment_role_sid=None, push_credential_sid=None):
self.service_sid = service_sid
self.endpoint_id = endpoint_id
self.deployment_role_sid = deployment_role_sid
self.push_credential_sid = push_credential_sid
@property
def key(self):
return "chat"
def to_payload(self):
grant = {}
if self.service_sid:
grant['service_sid'] = self.service_sid
if self.endpoint_id:
grant['endpoint_id'] = self.endpoint_id
if self.deployment_role_sid:
grant['deployment_role_sid'] = self.deployment_role_sid
if self.push_credential_sid:
grant['push_credential_sid'] = self.push_credential_sid
return grant
class IpMessagingGrant(AccessTokenGrant):
"""Grant to access Twilio IP Messaging"""
@deprecated
def __init__(self, service_sid=None, endpoint_id=None,
deployment_role_sid=None, push_credential_sid=None):
self.service_sid = service_sid
self.endpoint_id = endpoint_id
self.deployment_role_sid = deployment_role_sid
self.push_credential_sid = push_credential_sid
@property
def key(self):
return "ip_messaging"
def to_payload(self):
grant = {}
if self.service_sid:
grant['service_sid'] = self.service_sid
if self.endpoint_id:
grant['endpoint_id'] = self.endpoint_id
if self.deployment_role_sid:
grant['deployment_role_sid'] = self.deployment_role_sid
if self.push_credential_sid:
grant['push_credential_sid'] = self.push_credential_sid
return grant
class SyncGrant(AccessTokenGrant):
"""Grant to access Twilio Sync"""
def __init__(self, service_sid=None, endpoint_id=None):
self.service_sid = service_sid
self.endpoint_id = endpoint_id
@property
def key(self):
return "data_sync"
def to_payload(self):
grant = {}
if self.service_sid:
grant['service_sid'] = self.service_sid
if self.endpoint_id:
grant['endpoint_id'] = self.endpoint_id
return grant
class VoiceGrant(AccessTokenGrant):
"""Grant to access Twilio Programmable Voice"""
def __init__(self,
incoming_allow=None,
outgoing_application_sid=None,
outgoing_application_params=None,
push_credential_sid=None,
endpoint_id=None):
self.incoming_allow = incoming_allow
""" :type : bool """
self.outgoing_application_sid = outgoing_application_sid
""" :type : str """
self.outgoing_application_params = outgoing_application_params
""" :type : dict """
self.push_credential_sid = push_credential_sid
""" :type : str """
self.endpoint_id = endpoint_id
""" :type : str """
@property
def key(self):
return "voice"
def to_payload(self):
grant = {}
if self.incoming_allow is True:
grant['incoming'] = {}
grant['incoming']['allow'] = True
if self.outgoing_application_sid:
grant['outgoing'] = {}
grant['outgoing']['application_sid'] = self.outgoing_application_sid
if self.outgoing_application_params:
grant['outgoing']['params'] = self.outgoing_application_params
if self.push_credential_sid:
grant['push_credential_sid'] = self.push_credential_sid
if self.endpoint_id:
grant['endpoint_id'] = self.endpoint_id
return grant
class ConversationsGrant(AccessTokenGrant):
"""Grant to access Twilio Conversations"""
@deprecated
def __init__(self, configuration_profile_sid=None):
self.configuration_profile_sid = configuration_profile_sid
@property
def key(self):
return "rtc"
def to_payload(self):
grant = {}
if self.configuration_profile_sid:
grant['configuration_profile_sid'] = self.configuration_profile_sid
return grant
class VideoGrant(AccessTokenGrant):
"""Grant to access Twilio Video"""
def __init__(self, room=None):
self.room = room
@property
def key(self):
return "video"
def to_payload(self):
grant = {}
if self.room:
grant['room'] = self.room
return grant
class TaskRouterGrant(AccessTokenGrant):
"""Grant to access Twilio TaskRouter"""
def __init__(self, workspace_sid=None, worker_sid=None, role=None):
self.workspace_sid = workspace_sid
self.worker_sid = worker_sid
self.role = role
@property
def key(self):
return "task_router"
def to_payload(self):
grant = {}
if self.workspace_sid:
grant['workspace_sid'] = self.workspace_sid
if self.worker_sid:
grant['worker_sid'] = self.worker_sid
if self.role:
grant['role'] = self.role
return grant
class PlaybackGrant(AccessTokenGrant):
"""Grant to access Twilio Live stream"""
def __init__(self, grant=None):
"""Initialize a PlaybackGrant with a grant retrieved from the Twilio API."""
self.grant = grant
@property
def key(self):
"""Return the grant's key."""
return "player"
def to_payload(self):
"""Return the grant."""
return self.grant
|
|
#!/usr/bin/env python
from __future__ import unicode_literals
"""
Copyright (C) 2012 Legoktm
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
CONVERT_AMPM = {'AM':'a.m.', 'PM':'p.m.'}
import re
import datetime
import requests
import bs4
import pywikibot
import mwparserfromhell
SITE = pywikibot.Site()
HEADERS = {'User-Agent':'HurricaneInfoUpdaterBot - http://enwp.org/User:Legobot'}
COMMENT_THINGY = "<!-- SAME DAY, EDT AND UTC: TIME EDT (TIME UTC) DATE. DIFFERENT DAYS, EDT AND UTC: TIME EDT DATE EDT (TIME UTC DATE UTC) ///NOTICE THE DATE UTC INSIDE THE PARENTHESIS NOT OUTSIDE-->"
class Hurricane:
"""
A class which should represent each storm
"""
def __init__(self, url, wikipage):
"""
@param wikipage Wikipedia page of storm
@type wikipage pywikibot.Page
@param url url to the NHC report
@type url unicode
"""
self.url = url
self.wikipage = wikipage
def fetch_info(self):
r = requests.get(self.url, headers=HEADERS)
if not r.ok:
raise
soup = bs4.BeautifulSoup(r.text)
self.NHC_report = soup.body.pre.string
def parse_info(self):
for line in self.NHC_report.splitlines():
if line.startswith('LOCATION...'):
self.LOCATION = {'latc':line[11:15], 'latd':line[15],
'lonc':line[17:21], 'lond':line[21]}
elif line.startswith('ABOUT'):
self.ABOUT = line
elif line.startswith('PRESENT MOVEMENT'):
self.MOVEMENT = self.parse_movement(line)
elif line.startswith('MINIMUM CENTRAL PRESSURE'):
self.PRESSURE = self.parse_pressure(line)
elif line.startswith('MAXIMUM SUSTAINED WINDS'):
if not hasattr(self, 'WINDS'):
self.WINDS = self.parse_wind(line)
self.CATEGORY = self.determine_category(self.WINDS['mph'])
elif line.startswith('SUMMARY') and 'UTC' in line:
if not hasattr(self, 'UTC_TIMESTAMP'):
self.UTC_TIMESTAMP = self.format_timestamp(self.parse_timestamp(line))+ COMMENT_THINGY
def parse_movement(self, line):
if 'STATIONARY' in line:
return 'STATIONARY'
line=line[19:]
match = re.match('(\w{1,3}) OR (\d{1,3}) DEGREES AT (\d{1,3}) MPH...(\d{1,3}) KM/H', line)
d = {
'direction':match.group(1),
'degrees':match.group(2),
'mph':match.group(3),
'kmh':match.group(4),
}
d['knots'] = self.kmhtkt(int(d['kmh']))
return d
def kmhtkt(self, kmh):
"""
Convert km/h --> knots
"""
constant = float(1.852)
return float(kmh)/constant
def format_movement(self, data):
if type(data) == str:
return 'Stationary'
data['knots'] = int(self.kmhtkt(data['kmh']))
return '%(direction)s at %(knots)s kt (%(mph)s mph; %(kmh)s km/h)' % data
def parse_pressure(self, line):
line=line[27:]
s=line.split('...')
mb = s[0].split(' ')[0]
inch = s[1].split(' ')[0]
return {'mbar':mb,'inch':inch}
def format_pressure(self, data):
return '%(mbar)s [[mbar]] ([[hPa]]; %(inch)s [[inHg]])' % data
def parse_wind(self, line):
line=line[26:]
find = line.find('MPH')
mph = int(line[:find-1])
find2 = line.find('KM/H')
kmh = int(line[find+6:find2-1])
return {'mph':mph, 'kmh':kmh}
def format_wind(self, data):
return '%(mph)s mph (%(kmh)s km/h)' % data
def determine_category(self, mph):
if mph <= 38:
return 'depression'
elif mph <= 73:
return 'storm'
elif mph <= 95:
return 'cat1'
elif mph <= 110:
return 'cat2'
elif mph <= 129:
return 'cat3'
elif mph <= 156:
return 'cat4'
else:
return 'cat5'
def parse_timestamp(self, line):
print line
find = line.find('UTC')
time = line[find-5:find-1]
day = datetime.date.today()
dt = datetime.datetime(day.year, day.month, day.day, int(time[:2]), int(time[2:]))
self.TIMESTAMP = dt
return dt
def format_timestamp(self, dt):
AST = dt - datetime.timedelta(hours=4)
AST_AMPM = CONVERT_AMPM[AST.strftime('%p')]
AST_HR = str(int(AST.strftime('%I')))
DATE = dt.strftime('%B %d')
AST_DATE = AST.strftime('%B %d')
UTC_TIME = dt.strftime('%H%M')
if AST_DATE == DATE:
return AST_HR + ' ' + AST_AMPM + ' [[Eastern Daylight Time|EDT]] (' + UTC_TIME + ' [[Coordinated Universal Time|UTC]]) ' + DATE
else:
return AST_HR + ' ' + AST_AMPM + ' ' + AST_DATE + ' [[Eastern Daylight Time|EDT]] (' + UTC_TIME + ' '+ DATE + ' [[Coordinated Universal Time|UTC]])'
def update(self, push=True):
self.fetch_info()
self.parse_info()
print self.LOCATION
print self.CATEGORY
print self.ABOUT
print self.MOVEMENT
print self.PRESSURE
print self.WINDS
#print self.UTC_TIMESTAMP
#actually update crap
#return
text = self.wikipage.get()
code = mwparserfromhell.parse(text)
main = pywikibot.Page(self.wikipage.site, '2012 Atlantic hurricane season')
main_text = main.get()
main_code = mwparserfromhell.parse(main_text)
for template in code.filter_templates():
name = template.name.lower().strip()
if name == 'Infobox hurricane current'.lower():
if template.get('name').value.strip() == 'Hurricane Sandy':
template.get('time').value = self.UTC_TIMESTAMP
template.get('category').value = self.CATEGORY
template.get('gusts').value = self.format_wind(self.WINDS)
template.get('lat').value = self.LOCATION['latc']
template.get(1).value = self.LOCATION['latd']
template.get('lon').value = self.LOCATION['lonc']
template.get(2).value = self.LOCATION['lond']
template.get('movement').value = self.format_movement(self.MOVEMENT)
template.get('pressure').value = self.format_pressure(self.PRESSURE)
pywikibot.showDiff(text, unicode(code))
if push:
self.wikipage.put(unicode(code), 'Bot: Updating hurricane infobox. Errors? [[User talk:Legoktm|report them!]]')
def main():
pg = pywikibot.Page(SITE, 'User:Legobot/Current hurricanes')
text = pg.get()
go=False
for line in text.splitlines():
if go:
split = line.split('||')
if len(split) == 2:
storm = Hurricane(split[0], pywikibot.Page(SITE, split[1]))
storm.update(push=True)
elif len(split) == 3:
storm1 = Hurricane(split[0], pywikibot.Page(SITE, split[2]))
if split[1]:
storm2 = Hurricane(split[1], pywikibot.Page(SITE, split[2]))
else:
storm1.update(push=True)
continue
storm1.update(push=False)
storm2.update(push=False)
if storm1.TIMESTAMP > storm2.TIMESTAMP:
print 'Updating storm1'
storm1.update(push=True)
else:
print 'Updating storm2'
storm2.update(push=True)
if '<pre>' in line:
go=True
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
u"""
Make canvasNode into static one.
Read input/output port into static attributes.
"""
import sys
import textwrap
import maya.api.OpenMaya as api
import maya.OpenMaya as oldapi
import maya.cmds as cmds
import FabricEngine.Core
import kraken.plugins.maya_plugin.conversion as conv
# ==================================================================================
MAYA_API_VERSION = oldapi.MGlobal.apiVersion()
__author__ = 'yamahigashi'
__version__ = '0.0.1'
# _TYPE_IDS = 0x001A0002
maya_useNewAPI = True
# ==================================================================================
class CanvasWrapper(api.MPxNode):
type_name = 'canvasWrapper'
# type_id = api.MTypeId(_TYPE_IDS)
canvasPath = r"D:\fabric\hogea.canvas"
@property
def dgNode(self):
'''for shortcut.'''
return api.MFnDependencyNode(self.thisMObject())
@property
def dfgExec(self):
'''for shortcut.'''
return self._canvasGraph.getExec()
@property
def dfgBinding(self):
'''for shortcut.'''
return self._canvasGraph
@property
def host(self):
'''for shortcut.'''
return self._client.DFG.host
def postConstructor(self):
self._restoredFromPersistenceData = False
self._dummyValue = 17
self._isTransferingInputs = False
# self._instances.push_back(this)
self._dgDirtyEnabled = True
self._portObjectsDestroyed = False
self._affectedPlugsDirty = True
self._affectedPlugs = api.MPlugArray()
self._outputsDirtied = False
self.isDeformer = False
# store internal state for evaluation
self._dirtyPlugs = []
self.spliceMayaDataOverride = []
self._client = getClient()
with open(self.canvasPath, 'r') as f:
self.canvasJson = f.read()
self._canvasGraph = self.host.createBindingFromJSON(self.canvasJson)
@classmethod
def initialize(cls):
fnTyped = api.MFnTypedAttribute()
fnNumeric = api.MFnNumericAttribute()
saveData = fnTyped.create("saveData", "svd", api.MFnData.kString)
fnTyped.hidden = True
cls.addAttribute(saveData)
evalId = fnNumeric.create("evalID", "evalID", api.MFnNumericData.kInt, 0)
fnNumeric.keyable = True
fnNumeric.hidden = True
fnNumeric.readable = True
fnNumeric.writable = True
fnNumeric.storable = False
fnNumeric.cached = False
cls.addAttribute(evalId)
# ------------------------------------------------------------------ #
for a in cls.ports:
attrName = a['name']
ioType = a.get('execPortType', None)
typeSpec = a.get('typeSpec', None)
if typeSpec and "mat44" in typeSpec.lower():
cls._addMat44Attr(attrName, ioType, typeSpec)
elif typeSpec and "scalar" in typeSpec.lower():
cls._addFloatAttr(attrName, ioType, typeSpec)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_bone0FK, cls.arm_R_defConstraint_klOp_constrainees)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_bone0FK, cls.arm_R_ikSolver_klOp_bone0Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_bone0FK, cls.arm_R_ikSolver_klOp_bone1Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_bone0FK, cls.arm_R_ikSolver_klOp_bone2Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_bone0FK, cls.arm_R_ikSolver_klOp_midJointOut)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_bone1FK, cls.arm_R_defConstraint_klOp_constrainees)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_bone1FK, cls.arm_R_ikSolver_klOp_bone0Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_bone1FK, cls.arm_R_ikSolver_klOp_bone1Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_bone1FK, cls.arm_R_ikSolver_klOp_bone2Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_bone1FK, cls.arm_R_ikSolver_klOp_midJointOut)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_ikHandle, cls.arm_R_defConstraint_klOp_constrainees)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_ikHandle, cls.arm_R_ikSolver_klOp_bone0Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_ikHandle, cls.arm_R_ikSolver_klOp_bone1Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_ikHandle, cls.arm_R_ikSolver_klOp_bone2Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_ikHandle, cls.arm_R_ikSolver_klOp_midJointOut)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_root, cls.arm_R_defConstraint_klOp_constrainees)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_root, cls.arm_R_ikSolver_klOp_bone0Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_root, cls.arm_R_ikSolver_klOp_bone1Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_root, cls.arm_R_ikSolver_klOp_bone2Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_root, cls.arm_R_ikSolver_klOp_midJointOut)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_upV, cls.arm_R_defConstraint_klOp_constrainees)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_upV, cls.arm_R_ikSolver_klOp_bone0Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_upV, cls.arm_R_ikSolver_klOp_bone1Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_upV, cls.arm_R_ikSolver_klOp_bone2Out)
cls.attributeAffects(cls.arm_R_ikSolver_klOp_upV, cls.arm_R_ikSolver_klOp_midJointOut)
@classmethod
def _addFloatAttr(cls, attrName, ioType, typeSpec):
fnNumeric = api.MFnNumericAttribute()
exec("""cls.{0} = fnNumeric.create("{0}", "{0}", api.MFnNumericData.kDouble)""".format(attrName))
if 'in' in ioType.lower():
fnNumeric.keyable = True
fnNumeric.readable = False
fnNumeric.writable = True
fnNumeric.storable = False
else:
fnNumeric.keyable = False
fnNumeric.readable = True
fnNumeric.writable = False
fnNumeric.storable = False
exec("""cls.addAttribute(cls.{})""".format(attrName))
@classmethod
def _addMat44Attr(cls, attrName, ioType, typeSpec):
matAttr = api.MFnMatrixAttribute()
exec("""cls.{0} = matAttr.create("{0}", "{0}", api.MFnMatrixAttribute.kDouble)""".format(attrName))
if 'in' in ioType.lower():
matAttr.storable = True
matAttr.keyable = True
matAttr.hidden = False
if "[]" in typeSpec:
matAttr.array = True
matAttr.readable = False
else:
matAttr.storable = True
matAttr.keyable = False
matAttr.hidden = False
if "[]" in typeSpec:
matAttr.array = True
matAttr.usesArrayDataBuilder = True
matAttr.writable = False
matAttr.readable = True
exec("""cls.addAttribute(cls.{})""".format(attrName))
def compute(self, plug, block):
"""
Args:
plug (MPlug): plug representing the attribute that needs to be recomputed.
block (MDataBlock): data block containing storage for the node's attributes.
"""
import time
print("compute {} begin {}".format(plug.name(), time.time()))
self._outputsDirtied = False
if self.dfgBinding.getErrors(True) != "[]":
sys.stderr.write("canvas got error(s) {}".format(len(self.dfgBinding.getErrors(True))))
sys.stderr.write("error: {}".format(self.dfgBinding.getErrors(True)))
return False
# FabricSplice::Logging::AutoTimer timer("Maya::compute()")
if self.transferInputValuesToSplice(plug, block):
self.evaluate()
self.transferOutputValuesToMaya(plug, block, self.isDeformer)
print("compute {} finish".format(plug.name()))
# ////////////////////////////////////////////////////////////////////////
def evaluate(self):
self.dfgBinding.execute()
def transferInputValuesToSplice(self, plug, data):
"""
Args:
data (MDataBlock):
"""
if self._isTransferingInputs:
return False
self._isTransferingInputs = True
for p in self.attributeAffectsPair:
src = p[0]
dst = p[1]
code = textwrap.dedent("""
if "{dst}" in plug.name():
plug = self.dgNode.findPlug("{src}", False)
conv.dfgPlugToPort_mat44(plug, data, self.dfgBinding, "{src}")
""".format(src=src, dst=dst))
exec(code)
self._isTransferingInputs = False
return True
def transferOutputValuesToMaya(self, plug, data, isDeformer=False):
"""
Args:
data (MDataBlock):
isDeformer (bool):
"""
# print "transferOutputValuesToMaya 1: " + plug.name()
# if self._isTransferingInputs:
# return
for p in self.attributeAffectsPair:
dst = p[1]
code = textwrap.dedent("""
if "{0}" in plug.name():
plug = self.dgNode.findPlug("{0}", False)
conv.dfgPortToPlug_mat44(self.dfgBinding, "{0}", plug, data)
""".format(dst))
exec(code)
def affectChildPlugs(self, plug, affectedPlugs):
"""
Args:
plug (MPlug):
affectedPlugs (MPlugArray):
Returns:
affectedPlugs (MPlugArray):
"""
pass
def setupMayaAttributeAffects(self, portName, portMode, newAttribute):
'''
Args:
portName (str):
portMode (Port_Mode):
newAttribute (MObject):
'''
# FabricSplice::Logging::AutoTimer globalTimer("Maya::setupMayaAttributeAffects()");
# std::string localTimerName = (std::string("Maya::")+_spliceGraph.getName()+"::setupMayaAttributeAffects()").c_str();
# FabricSplice::Logging::AutoTimer localTimer(localTimerName.c_str());
# -----------------------------------------------------------------------------
def getClient():
"""Gets the Fabric client from the DCC. This ensures that the same client
is used, instead of a new one being created each time one is requiredself.
Returns:
Fabric Client.
"""
print("get fabric client")
contextID = cmds.fabricSplice('getClientContextID')
if not contextID:
cmds.fabricSplice('constructClient')
contextID = cmds.fabricSplice('getClientContextID')
options = {
'contextID': contextID,
'guarded': False
}
client = FabricEngine.Core.createClient(options)
print(client)
return client
|
|
import logging
import subprocess
import sys
import os
import shutil
import binascii
import time
from tkinter import Tk, messagebox, Frame, Label, Entry, Button, Listbox, Scrollbar, Spinbox, END, DISABLED, StringVar, VERTICAL, NORMAL
from providers.scriptedFileSearch import ScriptedFileSearch
from wrappers.cardScanWrapper import CardScanWrapper
from providers.soundProvider import SoundProvider
from providers.rfidScannerProvider import RFIDScannerProvider
from environment.environment import Environment
class Editor:
# Info Defaults
LOG_FILE = 'editor.log'
devices = []
vidPK = []
vidPATH = []
vidNAME = []
uuid = []
uuidFK = []
def __init__(self, master, soundGenerator, rfidScanner):
self.environment = Environment()
self.soundProvider = SoundProvider(soundGenerator)
self.configureScannerProvider(rfidScanner)
self.load()
frame = Frame(master)
frame.pack()
self.activeCardNumber = StringVar()
self.usbSpin = StringVar()
self.usbSpin.set(self.environment.Usb)
Label(frame, text='RFID Card').grid(row=0, column=0)
Label(frame, text='Video').grid(row=0, column=2)
self.ee = Entry(frame, textvariable=self.activeCardNumber,
state=DISABLED, disabledforeground='black')
self.ee.grid(row=1, column=0)
self.r = Button(frame, text='Read Card', command=self.startCardProcess)
self.r.grid(row=1, column=1)
self.box = Listbox(frame)
for entry in self.vidNAME:
self.box.insert(END, entry)
self.box.bind("<<ListboxSelect>>", self.newselection)
self.box.grid(row=1, rowspan=5, column=2, columnspan=2)
self.scroll = Scrollbar(self.box, orient=VERTICAL)
self.box.config(yscrollcommand=self.scroll.set)
self.scroll.config(command=self.box.yview)
Button(frame, text='Assign Kill Code',
command=self.createKiller).grid(row=2, column=0)
Label(frame, text='Source USB').grid(row=4, column=0)
self.spin = Spinbox(frame, values=self.devices)
self.spin.delete(0, END)
self.spin.insert(0, self.environment.Usb)
self.spin.grid(row=5, column=0)
self.status = Button(frame, text='Update Device Repository',
command=self.updateDevice, disabledforeground='blue')
self.status.grid(row=6, column=0)
Button(frame, text='Save', command=self.save).grid(row=6, column=2)
Button(frame, text='Quit', command=self.closeWithSavePrompt).grid(
row=6, column=3)
def configureScannerProvider(self, rfidScanner):
provider = RFIDScannerProvider(rfidScanner)
self.RFIDScannerProvider = provider.PN532(
int(self.environment.CHIP_SELECT_PIN),
int(self.environment.MASTER_OUTPUT_SLAVE_INPUT_PIN),
int(self.environment.MASTER_INPUT_SLAVE_OUTPUT_PIN),
int(self.environment.SERIAL_CLOCK_PIN))
def closeWithSavePrompt(self):
ans = messagebox.askquestion(
'Save And Quit', 'Would you like to save your changes?')
if ans == 'yes':
self.save()
sys.exit(0)
def startCardProcess(self):
# Disable button to prevent event stacking
self.r.config(state=DISABLED)
self.processCard()
self.r.config(state=NORMAL)
def processCard(self):
# Scans RFID cards and sets them to text box
try:
self.processCardUnchecked()
except Exception as e:
self.displayScanError(e)
def processCardUnchecked(self):
cardScan = CardScanWrapper(self.soundS, self.RFIDScannerProvider)
cardScan.runScan()
self.processResult(cardScan.getFormattedResult())
def processResult(self, scanResult):
if scanResult == None:
return
self.activeCardNumber.set(scanResult) # Populate text box
self.deselectActiveListboxItems()
self.linkCardWithListbox(scanResult)
def deselectActiveListboxItems(self):
# De-select any active items in listbox
self.box.selection_clear(0, END)
def linkCardWithListbox(self, scanResult):
index = self.verifyCard(scanResult)
if str(self.uuidFK[index]) == self.environment.KillCommand:
messagebox.showinfo(
'Kill Card', 'This card is currently assigned to kill the application.')
return
self.highlightItemInListbox(index)
def highlightItemInListbox(self, index):
try:
i = self.vidPK.index(self.uuidFK[index])
except:
messagebox.showinfo('Card Unassigned',
'Card is not currently assigned to a video')
else:
self.box.see(i)
self.box.selection_clear(0, END)
self.box.selection_set(i)
self.box.activate(i)
def verifyCard(self, uidt):
try:
uuidIndex = self.uuid.index(uidt)
except:
uuidIndex = self.addNewCard(uidt)
return uuidIndex
def addNewCard(self, uidt):
self.uuid.append(uidt)
self.uuidFK.append(-1)
newIndex = len(self.uuid) - 1
return newIndex
def displayScanError(self, e):
messagebox.showerror('Error Occurred', 'Error: ' + str(e))
logging.error('Scan Failed: ' + str(e))
def save(self):
toSaveList = self.makePairedList(
self.vidPK, self.vidNAME, self.vidPATH)
self.safeSaveToFile(self.environment.VideoList, toSaveList)
toSaveList = self.makePairedList(self.uuid, self.uuidFK)
self.safeSaveToFile(self.environment.UuidTable, toSaveList)
def makePairedList(self, *itemLists):
stop = len(itemLists)
subList = []
listAll = []
for listIndex in range(len(itemLists[0])):
del subList[:]
for indice in range(stop):
subList.append(itemLists[indice][listIndex])
listAll.append(list(subList))
return listAll
def safeSaveToFile(self, fileName, pairList):
try:
self.writePairedListToTempFile(fileName, pairList)
except Exception as e:
logging.error('Failed to create video list: ' + str(e))
else:
self.replaceOriginalFileWithItsTemp(fileName)
def replaceOriginalFileWithItsTemp(self, fileName):
try:
if os.path.isfile(fileName):
os.remove(fileName)
os.rename(fileName + '.temp', fileName)
except Exception as e:
logging.error('Failed to replace old video list: ' + str(e))
def writePairedListToTempFile(self, fileName, pairedList):
f = open(fileName + '.temp', 'w')
self.writePairedListGivenFile(f, pairedList)
f.close()
def writePairedListGivenFile(self, f, pairedList):
i = 0
while(i < len(pairedList) - 1):
self.writeSingleLineOfPairedListToOpenFile(f, pairedList, i)
f.write('\n')
i = i+1
self.writeSingleLineOfPairedListToOpenFile(f, pairedList, i)
def writeSingleLineOfPairedListToOpenFile(self, f, pairedList, itemIndex):
fLine = ""
for item in range(len(pairedList[itemIndex])):
fLine = fLine + str(pairedList[itemIndex][item]) + ','
f.write(fLine[:-1])
def updateDevice(self):
scan = self.safeScan()
if scan != None:
self.safeProcessScan(scan)
self.status.config(text='Update Device Repository', state=NORMAL)
self.status.update_idletasks()
def safeScan(self):
scan = None
try:
scan = self.runScannerWithNotification()
except Exception as e:
self.showScanErrorMessage(e)
return scan
def runScannerWithNotification(self):
self.status.config(text='Scanning...', state=DISABLED)
self.status.update_idletasks()
scan = ScriptedFileSearch(subprocess)
scan.scan("scanner.sh")
return scan
def showScanErrorMessage(self, e):
messagebox.showerror('Scan Failed', 'Scan error: ' + str(e))
logging.error(str(e))
def safeProcessScan(self, scan):
try:
self.processScan(scan)
except Exception as e:
self.showErrorMessage(e)
def showErrorMessage(self, e):
messagebox.showerror('Error', 'Error: ' + str(e))
logging.error(str(e))
def refreshListBox(self):
self.box.delete(0, END)
for entry in self.vidNAME:
self.box.insert(END, entry)
def processScan(self, scan):
# Check if a scan turned up any results
if self.scanIsEmpty(scan):
self.showAbortScanMessage()
return
self.verifyUSB()
self.processScanFiles(scan)
self.refreshListBox()
def showAbortScanMessage(self):
messagebox.showwarning(
'No Files Found', 'A scan failed to find any files.')
logging.warning('Empty Scan occurred when attempting a merge')
def scanIsEmpty(self, scan):
return len(scan.NAME) == 0
def verifyUSB(self):
if self.environment.Usb != self.spin.get():
self.Usb = self.spin.get()
self.environment.update()
def processScanFiles(self, scan):
i = 0
j = 0
newPK = []
newName = []
newPath = []
self.status.config(text='Reading Files...')
self.status.update_idletasks()
# Iterate through list
while i < len(scan.NAME):
# Verifiy File
try:
if scan.PATH[i].find(self.environment.Usb) >= 0:
# File resides on repository - update FK
try:
# Locate matching entry
fkk = self.vidNAME.index(scan.NAME[i])
except Exception as e:
# No matching entry
logging.info('New file found in repository: ' + str(e))
pass
else:
# Update FK on uuid table
for uu in self.uuidFK:
if uu == self.vidPK[fkk]:
uu = scan.PK[i]
# Store entry in new Tables
newPK.append(scan.PK[i])
newName.append(scan.NAME[i])
newPath.append(scan.PATH[i])
else:
# Video resides on updating device - check if file already copied
found = False
while j < len(scan.NAME):
if str(scan.NAME[i]) == str(scan.NAME[j]) and scan.PATH[j].find(self.environment.Usb) >= 0:
found = True
break
j = j + 1
if not found:
# Copy file and append
try:
# Get device name
device = scan.PATH[i].replace('/media/pi/', '')
device = device[0:device.find('/')]
# Copy
self.status.config(
text='Copying ' + scan.NAME[i] + '...')
self.status.update_idletasks()
shutil.copyfile(
scan.PATH[i], scan.PATH[i].replace(device, self.environment.Usb))
except Exception as e:
logging.error('Failed to copy' +
scan.NAME[i] + ': ' + str(e))
else:
# Add to new array
newPK.append(scan.PK[i])
newName.append(scan.NAME[i])
newPath.append(
scan.PATH[i].replace(device, self.environment.Usb))
except Exception as e:
logging.error(str(e))
i = i+1
del self.vidNAME[:]
del self.vidPATH[:]
del self.vidPK[:]
self.vidNAME = newName
self.vidPATH = newPath
self.vidPK = newPK
def newselection(self, event):
# Fires when a new item is selected in the listbox
selection = event.widget.curselection()
try:
txt = self.ee.get()
if txt == '':
return
i = self.uuid.index(txt)
self.uuidFK[i] = self.vidPK[selection[0]]
except Exception as e:
messagebox.showerror('Error During Set', 'Error: ' + str(e))
logging.error(str(e))
def createKiller(self):
try:
self.assignCurrentCardAsKiller()
self.box.selection_clear(0, END)
except Exception as e:
self.handleCardNotScannedError(e)
def assignCurrentCardAsKiller(self):
i = self.uuid.index(self.ee.get())
self.uuidFK[i] = int(self.environment.KillCommand)
def handleCardNotScannedError(self, e):
messagebox.showinfo(
'Card Not Scanned', 'Please scan a card to assign it a [Kill Application] code.' + str(e))
logging.error(str(e))
def load(self):
# Generate Log
logging.basicConfig(filename=self.LOG_FILE, level=logging.INFO)
# Load Sound file
self.soundProvider.init()
self.soundProvider.mixer.pre_init(44100, -16, 12, 512)
# pygame.init() IS this only for linux distribution?
self.soundS = self.soundProvider.mixer.Sound(self.environment.SCAN_SOUND)
self.soundS.set_volume(1)
# Create an instance of the PN532 class.
self.RFIDScannerProvider.begin())
# Configure PN532 to communicate with MiFare cards.
self.RFIDScannerProvider.SAM_configuration()
self.loadFiles()
self.locateDevices()
self.loadDevice()
def loadFiles(self):
self.readCSV(self.environment.VideoList, (int, self.vidPK),
(str, self.vidNAME), (str, self.vidPATH))
self.readCSV(self.environment.UuidTable,
(str, self.uuid), (int, self.uuidFK))
def readCSV(self, fileName, *storageList):
try:
fileContents = self.splitFileContentsIntoLines(fileName)
except Exception as e:
logging.error('Failed to load video list: ' + str(e))
else:
self.processCSV(fileContents, storageList)
def splitFileContentsIntoLines(self, fileName):
f = open(fileName, 'r')
fileContents = f.read().splitlines()
f.close()
return fileContents
def processCSV(self, fileContents, storageList):
i = 0
while (i < len(fileContents)):
split = fileContents[i].split(',')
for item in range(len(storageList)):
if storageList[item][0] is int:
storageList[item][1].append(int(split[item]))
else:
storageList[item][1].append(split[item])
i = i+1
def locateDevices(self):
try:
scan = ScriptedFileSearch(subprocess)
scan.scan("scanner.sh")
except Exception as e:
logging.error('Device scan failed: ' + str(e))
else:
self.searchScanForDevices(scan)
def loadDevice(self):
if self.environment.Usb == '' and len(self.devices) == 0:
self.terminateWithNoDeviceFailureMessage()
else:
self.handleAnyDeviceSearchFailures()
def handleAnyDeviceSearchFailures(self):
if self.environment.Usb == '' or self.environment.Usb == '< Not Set >':
self.showNoDeviceSetWarning()
elif len(self.devices) == 0:
self.terminateWithCurrentDeviceNotFoundMsg()
elif self.environment.Usb not in self.devices:
self.showCurrentDeviceNotFoundWarning()
def showNoDeviceSetWarning(self):
messagebox.showwarning(
'No USB Set', 'Please select a USB as a source device and then perform a Scan and Update')
logging.warning('No USB device is set!')
def showCurrentDeviceNotFoundWarning(self):
messagebox.showwarning(
'Missing USB Source', 'WARNING: The current USB repository was not found amoung the available devices.')
logging.warning(
'Current USB repository was not located in device scan!')
def terminateWithCurrentDeviceNotFoundMsg(self):
messagebox.showerror(
'No Devices Detected', 'No devices were detected including the current USB respository.\nPlease inspect USB device, or contact help.')
logging.critical('Scanner detected no devices. Closing...')
sys.exit(1)
def terminateWithNoDeviceFailureMessage(self):
messagebox.showerror(
'Storage Failure', 'No USB devices could be found, this editor will now close.')
logging.critical(
'Failed to find any devices with any media. Closing...')
sys.exit(1)
def searchScanForDevices(self, scan):
if len(scan.NAME) == 0:
self.showEmptyScanError()
else:
self.pullDeviceNamesFromPath(scan)
self.ensureDeviceWasFound()
def pullDeviceNamesFromPath(self, scan):
for path in scan.PATH:
try:
subpath = path.replace('/media/pi/', '')
if subpath[0:subpath.find('/')] not in self.devices:
self.devices.append(subpath[0:subpath.find('/')])
except:
pass
def ensureDeviceWasFound(self):
if len(self.devices) == 0:
messagebox.showerror(
'Improper Storage', 'Media files should not be stored in /media/pi.\nPlease move files to subfolder, or a USB device.')
logging.error(
'User error: Files were stored on pi media root. Requested User Action...')
def showEmptyScanError(self):
messagebox.showerror(
'Scan Error', 'Initial scan detected no files. Open case and inspect USB, or perform a restart.')
logging.error('Scan failed to detect files. (Do none exist?)')
|
|
"""
test class for zigzi
"""
from PEInstrument import *
from keystone import *
from PEManager import *
from capstone.x86 import *
import os
import unittest
# code_mnemonic = "and"
# code_op_str = "dx, 0xffff"
code_mnemonic = "and"
code_op_str = "edx, 0x7fffffff"
code = code_mnemonic + " " + code_op_str
def instrument_test(instruction):
hex_code = binascii.hexlify(code).decode('hex')
try:
# Initialize engine in X86-32bit mode
ks = Ks(KS_ARCH_X86, KS_MODE_32)
encoding, count = ks.asm(hex_code)
return encoding, count
except KsError as e:
print("ERROR: %s" % e)
return None, 0
class Tests(unittest.TestCase):
# default setting
_Image_Base_ = 0x400000
_Adjust_Size_ = 0x1000
def __init__(self, *args, **kwargs):
"""
in test case, each test will execute __init__ method.
so if you want common initializing at each test, do it here.
"""
super(Tests, self).__init__(*args, **kwargs)
def setUp(self):
"""
This method will execute at init time each test case like init method.
so if you want common initializing at each test, do it here.
"""
print("THIS IS SETUP")
self.src_instrument = PEInstrument.from_filename(self.src_filename)
self.dst_instrument = PEInstrument.from_filename(self.dst_filename)
def tearDown(self):
"""
This method will execute when each test case end.
so if you want common destroy at each test case, do it here.
"""
print("THIS IS TEARDOWN")
@classmethod
def setUpClass(cls):
"""
This method execute once at test class initializing.
so if you want common initializing at each test class, do it here.
"""
print("THIS IS setUpClass")
path = os.getcwd()
cls.code_log = open(os.path.join(path, "tests", "codelog.log"), 'w')
cls.reloc_log = open(os.path.join(path, "tests", "reloclog.log"), 'w')
# cls.src_filename = os.path.join(path, "tests", "firefox.exe")
# cls.dst_filename = os.path.join(path, "tests", "firefox_test.exe")
cls.src_filename = os.path.join(path, "tests", "sample.exe")
cls.dst_filename = os.path.join(path, "tests", "sample_test.exe")
# cls.src_filename = os.path.join(path, "tests", "simple_echo_server.exe")
# cls.dst_filename = os.path.join(path, "tests", "simple_echo_server_test.exe")
src_instrument = PEInstrument.from_filename(cls.src_filename)
src_instrument.register_pre_indirect_branch(instrument_test)
src_instrument.do_instrument()
src_instrument.writefile(cls.dst_filename)
cls.instrumented_dict = src_instrument.get_instrumented_pos()
src_pe_manager = PEManager(cls.src_filename)
dst_pe_manager = PEManager(cls.dst_filename)
dst_data_section = dst_pe_manager.get_data_section()
src_data_section = src_pe_manager.get_data_section()
cls._Adjust_Size_ = dst_data_section.VirtualAddress \
- src_data_section.VirtualAddress
cls._Image_Base_ = src_pe_manager.PE.OPTIONAL_HEADER.ImageBase
@classmethod
def tearDownClass(cls):
"""
This method execute once at test class closing.
so if you want common destroying at each test class, do it here.
"""
print("THIS IS tearDownClass")
def test_export_function(self):
test_fail_flag = False
log = ""
src_instrument = self.src_instrument
dst_instrument = self.dst_instrument
src_util = src_instrument.get_pe_manager()
dst_util = dst_instrument.get_pe_manager()
if not hasattr(src_util.PE, "DIRECTORY_ENTRY_EXPORT"):
print("THIS BINARY HAS NOT EXPORT.")
return True
src_export_entry = src_util.PE.DIRECTORY_ENTRY_EXPORT
dst_export_entry = dst_util.PE.DIRECTORY_ENTRY_EXPORT
src_export_entry_struct = src_export_entry.struct
dst_export_entry_struct = dst_export_entry.struct
src_fn_rva = []
dst_fn_rva = []
for index in range(len(src_export_entry.symbols)):
entry_fn_rva = src_export_entry_struct.AddressOfFunctions \
+ (index * 4)
fn_rva = src_util.PE.get_dword_at_rva(entry_fn_rva)
src_fn_rva.append(fn_rva)
for index in range(len(dst_export_entry.symbols)):
entry_fn_rva = dst_export_entry_struct.AddressOfFunctions \
+ (index * 4)
fn_rva = dst_util.PE.get_dword_at_rva(entry_fn_rva)
dst_fn_rva.append(fn_rva)
if len(src_fn_rva) != len(dst_fn_rva):
log += "Export function length is not matched\n"
test_fail_flag = True
for index in range(len(src_fn_rva)):
src_rva = src_fn_rva[index]
dst_rva = dst_fn_rva[index]
if not self.compare_bytes(src_rva, dst_rva, 4):
test_fail_flag = False
log += "{:x} {:x}\n".format(src_rva, dst_rva)
if not test_fail_flag:
self.fail(log)
def test_relocation(self):
test_fail_flag = False
src_instrument = self.src_instrument
dst_instrument = self.dst_instrument
src_manager = src_instrument.get_pe_manager()
dst_manager = dst_instrument.get_pe_manager()
src_relocation_dict = src_manager.get_relocation()
dst_relocation_dict = dst_manager.get_relocation()
src_execute_start, src_execute_end = \
src_manager.get_text_section_virtual_address_range()
dst_execute_start, dst_execute_end = \
dst_manager.get_text_section_virtual_address_range()
src_execute_start += self._Image_Base_
src_execute_end += self._Image_Base_
dst_execute_start += self._Image_Base_
dst_execute_end += self._Image_Base_
sorted_src_reloc_dict = sorted(src_relocation_dict.items(),
key=operator.itemgetter(0))
sorted_dst_reloc_dict = sorted(dst_relocation_dict.items(),
key=operator.itemgetter(0))
src_relocation_length = len(src_relocation_dict.keys())
dst_relocation_length = len(dst_relocation_dict.keys())
if src_relocation_length == dst_relocation_length:
print("RELOCATION DIRECTORY LENGTH IS SAME")
for index in range(len(sorted_src_reloc_dict)):
src_reloc_el = sorted_src_reloc_dict[index]
dst_reloc_el = sorted_dst_reloc_dict[index]
src_reloc_address = int(src_reloc_el[0])
src_reloc = src_reloc_el[1]
dst_reloc_address = int(dst_reloc_el[0])
dst_reloc = dst_reloc_el[1]
src_reloc_data = int(src_manager.PE.get_dword_at_rva(src_reloc_address))
dst_reloc_data = int(dst_manager.PE.get_dword_at_rva(dst_reloc_address))
self.reloc_log.write(
"[{:04x}]\t[0x{:x}][0x{:x}][{}]\t[0x{:x}][0x{:x}][{}]\n"
.format(index, src_reloc_address, src_reloc_data, src_reloc,
dst_reloc_address, dst_reloc_data, dst_reloc))
if src_execute_start < src_reloc_data < src_execute_end \
and dst_execute_start < dst_reloc_data < dst_execute_end:
dst_rva = dst_reloc_data - self._Image_Base_ - 0x1000
src_rva = src_reloc_data - self._Image_Base_ - 0x1000
instrumented_size = \
self.getInstrumentedSizeUntil(dst_rva,
self.instrumented_dict)
dst_rva -= instrumented_size
if dst_rva != src_rva:
self.reloc_log.write("\t[FAILED] ==> [0x{:x}]\t{:x}\t"
"expected [0x{:x}] but [0x{:x}]\n"
.format(src_rva, instrumented_size,
src_rva + instrumented_size,
dst_rva))
test_fail_flag = True
elif src_execute_end < src_reloc_data and dst_execute_end < dst_reloc_data:
if src_reloc_data + self._Adjust_Size_ != dst_reloc_data:
self.reloc_log.write("\t[FAILED] ==> [0x{:x}]\t[0x{:x}]\n"
.format(src_reloc_data, dst_reloc_data))
test_fail_flag = True
if test_fail_flag:
self.fail("RELOCATION ADJUST FAILED")
def test_codes(self):
src_instrument = self.src_instrument
dst_instrument = self.dst_instrument
src_disassemble = src_instrument.get_instructions()
dst_disassemble = dst_instrument.get_instructions()
execute_start, execute_end = \
src_instrument.get_pe_manager()\
.get_text_section_virtual_address_range()
src_size = execute_end - execute_start
execute_start, execute_end = \
dst_instrument.get_pe_manager()\
.get_text_section_virtual_address_range()
dst_size = execute_end - execute_start
dst_index = 0
src_index = 0
for index in range(len(dst_disassemble)):
try:
dst_inst_address, dst_inst = dst_disassemble[dst_index]
src_inst_address, src_inst = src_disassemble[src_index]
if dst_inst_address >= dst_size \
or src_inst_address >= src_size:
break
except:
self.fail("Something wrong when disassemble codes")
self.log_code(dst_inst, src_inst)
dst_str = self.inst_to_str(dst_inst)
src_str = self.inst_to_str(src_inst)
if dst_str == code:
dst_index += 1
continue
if dst_str != src_str:
if(dst_inst.mnemonic == src_inst.mnemonic
and len(dst_inst.operands) == len(src_inst.operands)):
if not(self.checkCompareInstruction(dst_inst, src_inst)):
find_match = False
for dst_search_depth in range(6):
if find_match:
break
for srcSearchDepth in range(6):
dst_search_addr, dst_dis_search = \
dst_disassemble[dst_index + dst_search_depth]
src_search_addr, src_dis_search = \
src_disassemble[src_index + srcSearchDepth]
if self.checkCompareInstruction(dst_dis_search,
src_dis_search):
self.log_code(dst_inst, src_inst)
for search_depth \
in range(dst_search_depth + 1):
addr, dst_dis_search = \
dst_disassemble[dst_index
+ search_depth]
self.log_code(dst_inst=dst_dis_search)
for search_depth in range(srcSearchDepth+1):
addr, src_dis_search = \
src_disassemble[
src_index + search_depth
]
self.log_code(src_inst=src_dis_search)
dst_index += dst_search_depth
src_index += srcSearchDepth
find_match = True
if find_match == False:
self.log_code(dst_inst, src_inst)
# assert False
"""
print("[TESTCODE]\t[0x{:x}]{:s}{:s}\t[0x{:x}]{:s}{:s}"
.format(dst_inst.address,
dst_inst.mnemonic, dst_inst.op_str,
src_dis.address,
src_dis.mnemonic, src_dis.op_str))
assert False
"""
else:
find_match = False
for dst_search_depth in range(6):
if find_match:
break
for srcSearchDepth in range(6):
dst_search_addr, dst_dis_search = \
dst_disassemble[dst_index + dst_search_depth]
src_search_addr, src_dis_search = \
src_disassemble[src_index + srcSearchDepth]
if self.checkCompareInstruction(dst_dis_search,
src_dis_search):
print("[DIFF MNEMONIC] ====================")
for search_depth in range(dst_search_depth + 1):
addr, dst_dis_search = \
dst_disassemble[dst_index
+ search_depth]
self.log_code(dst_inst=dst_dis_search)
for search_depth in range(srcSearchDepth + 1):
addr, src_dis_search = \
src_disassemble[src_index + search_depth]
self.log_code(src_inst=src_dis_search)
dst_index += dst_search_depth
src_index += srcSearchDepth
find_match = True
if not find_match:
self.log_code(dst_inst, src_inst)
dst_index += 1
src_index += 1
def log_code(self, dst_inst=None, src_inst=None, prev_str=None,
next_str=None):
if prev_str is not None:
self.code_log.write(prev_str + "\n")
if src_inst is None and dst_inst is not None:
self.code_log.write("[DESTINY] ==> [0x{:04x}]\t{:35s}\n"
.format(dst_inst.address,
self.inst_to_str(dst_inst))
)
elif src_inst is not None and dst_inst is None:
self.code_log.write("\t\t\t\t\t\t\t\t\t\t\t\t"
"[SOURCE] ==> [0x{:04x}]\t{:35s}\n"
.format(src_inst.address,
self.inst_to_str(src_inst))
)
else:
self.code_log.write("[0x{:04x}]\t{:35s}\t[0x{:04x}]\t{:35s}\n"
.format(dst_inst.address,
self.inst_to_str(dst_inst),
src_inst.address,
self.inst_to_str(src_inst))
)
if next_str is not None:
self.code_log.write(next_str + "\n")
def checkDirectJmp(self, dst_inst, src_inst):
result = False
src_jmp_target = src_inst.operands[0].imm
dst_jmp_target = dst_inst.operands[0].imm
if(dst_jmp_target - src_jmp_target
== self.getInstrumentedSizeUntil(dst_jmp_target,
self.instrumented_dict)
):
result = True
return result
def checkIndirectJmp(self, dst_inst, src_inst):
return self.checkCompareOperands(dst_inst.operands, src_inst.operands)
def checkCompareOperands(self, dst_operands, src_operands):
result = False
if len(dst_operands) == len(src_operands):
for index in range(len(dst_operands)):
dst_operand = dst_operands[index]
src_operand = src_operands[index]
if dst_operand.type == X86_OP_REG \
and src_operand.type == X86_OP_REG:
if dst_operand.reg == src_operand.reg:
result = True
elif dst_operand.type == X86_OP_IMM \
and src_operand.type == X86_OP_IMM:
if dst_operand.imm == src_operand.imm \
or ((dst_operand.imm - src_operand.imm)
== self._Adjust_Size_):
result = True
elif ((dst_operand.imm - src_operand.imm)
== self.getInstrumentedSizeUntil(dst_operand.imm
- 0x401000,
self.instrumented_dict)
):
result = True
else:
result = False
elif dst_operand.type == X86_OP_MEM \
and src_operand.type == X86_OP_MEM:
if dst_operand.mem.segment != 0:
if dst_operand.mem.segment != src_operand.mem.segment:
return False
if dst_operand.mem.base != 0:
if dst_operand.mem.base != src_operand.mem.base:
return False
if dst_operand.mem.index != 0:
if dst_operand.mem.index != src_operand.mem.index:
return False
if dst_operand.mem.scale != 1:
if not (dst_operand.mem.scale == src_operand.mem.scale):
return False
if dst_operand.mem.disp != 0:
if not (dst_operand.mem.disp == src_operand.mem.disp):
if not (dst_operand.mem.disp - src_operand.mem.disp
== self._Adjust_Size_):
return False
result = True
else:
result = False
return result
def checkCompareInstruction(self, dst_inst, src_inst):
if dst_inst.mnemonic == src_inst.mnemonic \
and dst_inst.op_str == src_inst.op_str:
result = True
elif dst_inst.groups == src_inst.groups:
if self.dst_instrument.disassembler.is_relative_branch(dst_inst):
result = self.checkDirectJmp(dst_inst, src_inst)
elif self.dst_instrument.disassembler.is_indirect_branch(dst_inst):
result = self.checkIndirectJmp(dst_inst, src_inst)
else:
result = self.checkCompareOperands(dst_inst.operands,
src_inst.operands)
else:
result = False
return result
@staticmethod
def getInstrumentedSizeUntil(rva, instrument_dict):
sorted_instrumented_dict = sorted(instrument_dict.items(),
key=operator.itemgetter(0))
instrumented_size = 0
for address, size in sorted_instrumented_dict:
if address < rva:
instrumented_size += size
else:
break
return instrumented_size
@staticmethod
def inst_to_str(instruction):
return instruction.mnemonic + " " + instruction.op_str
def compare_bytes(self, src_rva, dst_rva, length):
src_data = \
self.src_instrument.code_manager.get_data_from_rva(src_rva, length)
dst_data = \
self.dst_instrument.code_manager.get_data_from_rva(dst_rva, length)
if src_data == dst_data:
return True
return False
def setUpModule():
"""
This function will run before create test class instance.
"""
print("THIS IS setUpModule")
def tearDownModule():
"""
This function will run after test case closed.
"""
print("THIS IS tearDownModule")
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import absolute_import
import re
import json
import hashlib
from uuid import UUID, uuid4
from ast import literal_eval
from time import sleep
from subprocess import Popen, PIPE
from logging import getLogger
from six import string_types
from celery import states
from celery.app.control import flatten_reply
from que import IMPORTANT, E_SHUTDOWN, Q_MGMT, TT_EXEC, TT_MGMT, TT_INTERNAL, TG_DC_BOUND, TT, TG
from que.erigonesd import cq
from que.lock import TaskLock
from que.user_tasks import UserTasks
from que.exceptions import PingFailed, NodeError
# Defaults
LOGTASK = cq.conf.ERIGONES_LOGTASK
TASK_USER = cq.conf.ERIGONES_TASK_USER
DEFAULT_DC = cq.conf.ERIGONES_DEFAULT_DC
DEFAULT_TASK_PREFIX = [None, TT_EXEC, '1', TG_DC_BOUND, DEFAULT_DC]
RE_TASK_PREFIX = re.compile(r'([a-zA-Z]+)')
DEFAULT_FILE_READ_SIZE = 102400
logger = getLogger(__name__)
def is_valid_task_id(task_id):
"""
Return False if task ID is not valid.
"""
parts = task_id.split('-')
if len(parts) == 5 and [len(i) for i in parts[1:]] == [8, 4, 4, 4]:
tp = RE_TASK_PREFIX.split(parts[0])
return (len(tp) == 5 and
all(i.isdigit() for i in tp[::2]) and
tp[1] in TT and
tp[3] in TG)
return False
def task_id_from_string(user_id, owner_id=None, dummy=False, tt=TT_EXEC, tg=TG_DC_BOUND,
dc_id=DEFAULT_DC, task_prefix=None):
"""
Generate new task ID with prepended user ID.
"""
if task_prefix is None:
if owner_id is None:
owner_id = user_id
user_id = str(user_id)
task_prefix = user_id + tt + str(owner_id) + tg + str(dc_id)
x = str(uuid4())
if dummy:
return task_prefix + '-' + hashlib.md5(user_id).hexdigest()[-8:] + x[8:-13]
return task_prefix + '-' + x[:-13]
def task_prefix_from_task_id(task_id):
"""
Get (user ID, task type, owner ID) tuple from task ID.
"""
tp = RE_TASK_PREFIX.split(task_id[:-24])
return tuple(tp + DEFAULT_TASK_PREFIX[len(tp):])
def user_owner_ids_from_task_id(task_id):
"""
Get user ID and owner ID from task ID.
"""
task_prefix = task_prefix_from_task_id(task_id)
return task_prefix[0], task_prefix[2]
def user_owner_dc_ids_from_task_id(task_id):
"""
Get user ID, owner ID and DC ID from task ID.
"""
task_prefix = task_prefix_from_task_id(task_id)
return task_prefix[0], task_prefix[2], task_prefix[4]
def user_id_from_task_id(task_id):
"""
Get user ID from task ID.
"""
return user_owner_ids_from_task_id(task_id)[0]
def tt_from_task_id(task_id):
"""
Get task type from task ID.
"""
return task_prefix_from_task_id(task_id)[1]
def owner_id_from_task_id(task_id):
"""
Get owner ID from task ID.
"""
return task_prefix_from_task_id(task_id)[2]
def tg_from_task_id(task_id):
"""
Get TG from task ID.
"""
return task_prefix_from_task_id(task_id)[3]
def dc_id_from_task_id(task_id):
"""
Get Datacenter ID from task ID.
"""
return task_prefix_from_task_id(task_id)[4]
def task_id_from_request(request, owner_id=None, dummy=False, tt=TT_EXEC, tg=TG_DC_BOUND, dc_id=DEFAULT_DC):
"""
Create new task ID from request.
"""
if isinstance(request, string_types) or isinstance(request, int):
return task_id_from_string(request, owner_id=owner_id, dummy=dummy, tt=tt, tg=tg, dc_id=dc_id)
return task_id_from_string(request.user.id, owner_id=owner_id, dummy=dummy, tt=tt, tg=tg, dc_id=request.dc.id)
def task_id_from_task_id(task_id, user_id=None, owner_id=None, tt=None, tg=None, dc_id=None, keep_task_suffix=False):
"""
Create new task ID with task_prefix taken from existing task ID.
"""
if user_id or owner_id or tt or tg or dc_id:
task_prefix = list(task_prefix_from_task_id(task_id))
if user_id:
task_prefix[0] = str(user_id)
if tt:
task_prefix[1] = tt
if owner_id:
task_prefix[2] = str(owner_id)
if tg:
task_prefix[3] = tg
if dc_id:
task_prefix[4] = str(dc_id)
task_prefix = ''.join(task_prefix)
else:
task_prefix = task_id[:-24]
if keep_task_suffix:
return task_prefix + task_id[-24:]
return task_id_from_string(None, task_prefix=task_prefix)
def generate_internal_task_id():
"""Generate internal task ID"""
return task_id_from_string(TASK_USER, tt=TT_INTERNAL)
def is_dummy_task(task_id):
"""
Check if task ID is a real celery task ID.
"""
# noinspection PyBroadException
try:
user_id = user_id_from_task_id(task_id)
return task_id.split('-')[1] == hashlib.md5(user_id).hexdigest()[-8:]
except Exception:
pass
return False
def is_mgmt_task(task_id):
"""
Return True if task has task type == 'm'
"""
return tt_from_task_id(task_id) == TT_MGMT
def is_task_dc_bound(task_id):
"""
Get dc boundness from task ID.
"""
return tg_from_task_id(task_id) == TG_DC_BOUND
def _get_ar(ar_or_tid):
"""
Return AsyncResult.
"""
if isinstance(ar_or_tid, cq.AsyncResult):
return ar_or_tid
else:
return cq.AsyncResult(ar_or_tid)
def get_result(ar_or_tid):
"""
Return result (dict) of AsyncResult.
:returns: AsyncResult.result
:rtype: dict
"""
ar = _get_ar(ar_or_tid)
# noinspection PyBroadException
try:
result = ar.result
except Exception:
return 'Unknown error'
if type(result).__name__.endswith('TaskException'):
# because celery recreates the class TaskException instead of que.tasks.TaskException
# noinspection PyBroadException
try:
result = literal_eval(result.args[0])
except Exception:
return 'Unknown failure'
return result
def follow_callback(ar):
"""
Check if callback exists and follow it.
"""
ar = _get_ar(ar)
# noinspection PyBroadException
try:
ar = follow_callback(get_result(ar)['meta']['callback'])
except Exception:
pass
return ar
def get_callback(task_id):
"""
Check if task has a callback.
"""
# noinspection PyBroadException
try:
return get_result(task_id)['meta']['callback']
except Exception:
return False
def is_callback(task_id):
"""
Check if task is a callback => has a caller.
"""
# noinspection PyBroadException
try:
return get_result(task_id)['meta']['caller']
except Exception:
return False
def is_logtask(task_id):
"""
Check if task is a config.ERIGONES_LOGTASK.
"""
# noinspection PyBroadException
try:
return get_result(task_id)['meta']['cb_name'] == LOGTASK
except Exception:
return False
def send_task_forever(sender, task, delay=3, nolog=False, **kwargs):
"""
Try to run task forever.
http://docs.celeryproject.org/en/latest/reference/celery.html#celery.Celery.send_task
"""
ping_check = True
expires = kwargs.get('expires', None)
queue = kwargs.get('queue', None)
max_retries = 1
num_retries = 0
while True:
try:
if queue and ping_check:
if not ping(queue):
raise PingFailed('Task queue "%s" worker is not responding!' % queue)
t = cq.send_task(task, **kwargs)
except Exception as ex:
logger.warning('Sending task "%s" by %s failed. Error: %s', task, sender, ex)
if expires:
if num_retries < max_retries:
num_retries += 1
logger.warning('Task "%s" sent by %s can expire. Immediate retry attempt %d/%d.', task, sender,
num_retries, max_retries)
sleep(1)
else:
logger.error('Task "%s" sent by %s can expire. Failing after %d retries.', task, sender,
num_retries)
raise ex
else:
num_retries += 1
worker_shutting_down = E_SHUTDOWN.is_set()
if worker_shutting_down and num_retries > max_retries: # We are shutting down and we already tried once
ping_check = False # Just try to send the task without ping check
logger.warning('Task "%s" sent by %s must run! Retrying (%d) without ping check...',
task, sender, num_retries)
sleep(1)
else:
logger.warning('Task "%s" sent by %s must run! Retrying (%d) in %s seconds...',
task, sender, num_retries, delay)
sleep(delay)
else:
if nolog:
logger.debug('Task "%s" with id %s was created by %s', task, t.id, sender)
else:
logger.log(IMPORTANT, 'Task "%s" with id %s was created by %s', task, t.id, sender)
return t
def cancel_task(task_id, terminate=False, signal=None, force=False):
"""
Revoke task.
"""
# Callbacks must run; they should never expire (unless you are using force)
if not force and (get_callback(task_id) or is_callback(task_id)):
# Parent task has finished
return False
# Don't forget that this triggers also signal
return cq.control.revoke(task_id, terminate=terminate, signal=signal)
def log_task_callback(task_id, task_status=states.REVOKED, cleanup=True, detail='revoked', sender_name='???',
send_forever=True):
"""
Mark task status with task_status and create log callback task. USE with caution!
"""
user_id, owner_id = user_owner_ids_from_task_id(task_id)
# Internal user task
if owner_id == TASK_USER:
logger.debug('Task %s[%s] %s in log_task_callback :: Internal task - skipping', sender_name, task_id, detail)
return None
# If a lock created by this task still exists we need to remove it now - Bug #chili-592
if cleanup:
lock_key = TaskLock.get_lock_key_from_value(task_id)
if lock_key:
TaskLock(lock_key, desc='LogTask %s' % task_id).delete(check_value=task_id, fail_silently=True)
else:
logger.warning('Task %s[%s] %s in log_task_callback :: Reverse lock does not exist',
sender_name, task_id, detail)
# Task info from cache
task_info = UserTasks(owner_id).get(task_id)
if task_info is None:
logger.critical('Task %s[%s] %s in log_task_callback :: Task not found in UserTasks',
sender_name, task_id, detail)
return None
# Kwargs for logtask
task_info['task_status'] = task_status
task_info['cleanup'] = cleanup
# Create log task on mgmt
result = {
'detail': detail,
'meta': {
'cb_name': LOGTASK,
'msg': task_info.get('msg', ''),
'apiview': task_info.get('apiview', {})
}
}
task_params = {
'args': (result, task_id),
'kwargs': task_info,
'queue': Q_MGMT,
'expires': None, # This is callback -> never expire
'task_id': task_id_from_task_id(task_id),
}
if send_forever:
t = send_task_forever(task_id, LOGTASK, **task_params)
else:
t = cq.send_task(LOGTASK, **task_params)
if t:
logger.info('Task %s[%s] %s in log_task_callback :: Created logtask %s', sender_name, task_id, detail, t.id)
else:
logger.error('Task %s[%s] %s in log_task_callback :: Failed to create logtask', sender_name, task_id, detail)
return t
def delete_task(task_id, force=False):
"""
Delete task from UserTasks. Only for tasks which started, but failed to finish and are stuck in DB.
"""
if force:
logger.warning('Trying to delete task %s by using force.', task_id)
logger.warning('Forcing task deletion results in undefined behavior.')
else:
return None, 'Safe task deletion is not implemented'
# The task has a callback, which probably means that it has already finished on compute node.
callback = get_callback(task_id)
if callback:
logger.warning('Task has a callback: %s', callback)
logger.warning('Going to delete task %s!', task_id)
# Revoke before proceeding (should not do anything, but won't harm)
cancel_task(task_id, force=force)
# So, a task with STARTED state, but is not running and is not a callback (or did not start a callback).
# In order to delete the task we need to simulate task revoking and create a callback log task for doing a proper
# cleanup. The log callback will then remove the task from UserTasks.
try:
t = log_task_callback(task_id, detail='vanished', send_forever=False)
except Exception as ex:
return None, str(ex)
if t:
return t.id, None
else:
return None, 'Unknown error'
def queue_to_hostnames(queue):
"""
Return worker hostnames according to queue name.
"""
if queue == Q_MGMT:
return cq.conf.ERIGONES_MGMT_WORKERS
# noinspection PyRedundantParentheses
return (queue.replace('.', '@', 1),)
def ping(queue, timeout=True, count=1, retry_wait=0.5):
"""
Ping erigonesd worker(s) according to queue and return list of alive workers.
"""
pong = []
i = 0
workers = queue_to_hostnames(queue)
if isinstance(timeout, bool) and timeout:
timeout = cq.conf.ERIGONES_PING_TIMEOUT
while not pong and i < count:
i += 1
if i > 1:
sleep(retry_wait)
try:
res = cq.control.ping(destination=workers, timeout=timeout)
except Exception as ex:
logger.warning('Could not ping task queue "%s" workers: %s error: %s', queue, workers, ex)
else:
if res:
for answer in res:
worker, status = answer.items()[0]
if status == {'ok': 'pong'}:
pong.append(worker)
else:
logger.warning('Ping [%d] of queue "%s" workers "%s" failed (%s)', i, queue, worker, res)
else:
logger.warning('Ping [%d] of all queue "%s" workers: %s failed (%s)', i, queue, workers, res)
return pong
def worker_command(command, destination, **kwargs):
"""
Synchronous node (celery panel) command.
"""
kwargs['destination'] = [destination]
kwargs['reply'] = True
reply = flatten_reply(cq.control.broadcast(command, **kwargs))
try:
return reply[destination]
except (KeyError, TypeError):
return None
def validate_uuid(value):
"""
Validate UUID string.
Raises ValueError or returns the uuid string if valid.
"""
return str(UUID(value)) # Will raise ValueError in case of an invalid uuid
def fetch_node_uuid():
"""
Retrieve node UUID from sysinfo command output.
"""
proc = Popen(['sysinfo'], bufsize=0, close_fds=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
try:
return validate_uuid(json.loads(stdout)['UUID'])
except Exception as exc:
raise NodeError('Could not fetch node UUID: %s' % exc)
def read_file(fp, limit=DEFAULT_FILE_READ_SIZE):
"""
Return output of fp.read() limited to `limit` bytes of output from the end of file.
"""
fp.seek(0, 2) # Go to EOF
total = fp.tell()
if total > limit:
fp.seek(total - limit)
else:
fp.seek(0)
return fp.read()
|
|
"""
Perceptron
w = w + N * (d(k) - y) * x(k)
Using perceptron network for oil analysis, with Measuring of 3 parameters
that represent chemical characteristics we can classify the oil, in p1 or p2
p1 = -1
p2 = 1
"""
import random
class Perceptron:
def __init__(
self,
sample: list[list[float]],
target: list[int],
learning_rate: float = 0.01,
epoch_number: int = 1000,
bias: float = -1,
) -> None:
"""
Initializes a Perceptron network for oil analysis
:param sample: sample dataset of 3 parameters with shape [30,3]
:param target: variable for classification with two possible states -1 or 1
:param learning_rate: learning rate used in optimizing.
:param epoch_number: number of epochs to train network on.
:param bias: bias value for the network.
>>> p = Perceptron([], (0, 1, 2))
Traceback (most recent call last):
...
ValueError: Sample data can not be empty
>>> p = Perceptron(([0], 1, 2), [])
Traceback (most recent call last):
...
ValueError: Target data can not be empty
>>> p = Perceptron(([0], 1, 2), (0, 1))
Traceback (most recent call last):
...
ValueError: Sample data and Target data do not have matching lengths
"""
self.sample = sample
if len(self.sample) == 0:
raise ValueError("Sample data can not be empty")
self.target = target
if len(self.target) == 0:
raise ValueError("Target data can not be empty")
if len(self.sample) != len(self.target):
raise ValueError("Sample data and Target data do not have matching lengths")
self.learning_rate = learning_rate
self.epoch_number = epoch_number
self.bias = bias
self.number_sample = len(sample)
self.col_sample = len(sample[0]) # number of columns in dataset
self.weight: list = []
def training(self) -> None:
"""
Trains perceptron for epochs <= given number of epochs
:return: None
>>> data = [[2.0149, 0.6192, 10.9263]]
>>> targets = [-1]
>>> perceptron = Perceptron(data,targets)
>>> perceptron.training() # doctest: +ELLIPSIS
('\\nEpoch:\\n', ...)
...
"""
for sample in self.sample:
sample.insert(0, self.bias)
for i in range(self.col_sample):
self.weight.append(random.random())
self.weight.insert(0, self.bias)
epoch_count = 0
while True:
has_misclassified = False
for i in range(self.number_sample):
u = 0
for j in range(self.col_sample + 1):
u = u + self.weight[j] * self.sample[i][j]
y = self.sign(u)
if y != self.target[i]:
for j in range(self.col_sample + 1):
self.weight[j] = (
self.weight[j]
+ self.learning_rate
* (self.target[i] - y)
* self.sample[i][j]
)
has_misclassified = True
# print('Epoch: \n',epoch_count)
epoch_count = epoch_count + 1
# if you want control the epoch or just by error
if not has_misclassified:
print(("\nEpoch:\n", epoch_count))
print("------------------------\n")
# if epoch_count > self.epoch_number or not error:
break
def sort(self, sample: list[float]) -> None:
"""
:param sample: example row to classify as P1 or P2
:return: None
>>> data = [[2.0149, 0.6192, 10.9263]]
>>> targets = [-1]
>>> perceptron = Perceptron(data,targets)
>>> perceptron.training() # doctest: +ELLIPSIS
('\\nEpoch:\\n', ...)
...
>>> perceptron.sort([-0.6508, 0.1097, 4.0009]) # doctest: +ELLIPSIS
('Sample: ', ...)
classification: P...
"""
if len(self.sample) == 0:
raise ValueError("Sample data can not be empty")
sample.insert(0, self.bias)
u = 0
for i in range(self.col_sample + 1):
u = u + self.weight[i] * sample[i]
y = self.sign(u)
if y == -1:
print(("Sample: ", sample))
print("classification: P1")
else:
print(("Sample: ", sample))
print("classification: P2")
def sign(self, u: float) -> int:
"""
threshold function for classification
:param u: input number
:return: 1 if the input is greater than 0, otherwise -1
>>> data = [[0],[-0.5],[0.5]]
>>> targets = [1,-1,1]
>>> perceptron = Perceptron(data,targets)
>>> perceptron.sign(0)
1
>>> perceptron.sign(-0.5)
-1
>>> perceptron.sign(0.5)
1
"""
return 1 if u >= 0 else -1
samples = [
[-0.6508, 0.1097, 4.0009],
[-1.4492, 0.8896, 4.4005],
[2.0850, 0.6876, 12.0710],
[0.2626, 1.1476, 7.7985],
[0.6418, 1.0234, 7.0427],
[0.2569, 0.6730, 8.3265],
[1.1155, 0.6043, 7.4446],
[0.0914, 0.3399, 7.0677],
[0.0121, 0.5256, 4.6316],
[-0.0429, 0.4660, 5.4323],
[0.4340, 0.6870, 8.2287],
[0.2735, 1.0287, 7.1934],
[0.4839, 0.4851, 7.4850],
[0.4089, -0.1267, 5.5019],
[1.4391, 0.1614, 8.5843],
[-0.9115, -0.1973, 2.1962],
[0.3654, 1.0475, 7.4858],
[0.2144, 0.7515, 7.1699],
[0.2013, 1.0014, 6.5489],
[0.6483, 0.2183, 5.8991],
[-0.1147, 0.2242, 7.2435],
[-0.7970, 0.8795, 3.8762],
[-1.0625, 0.6366, 2.4707],
[0.5307, 0.1285, 5.6883],
[-1.2200, 0.7777, 1.7252],
[0.3957, 0.1076, 5.6623],
[-0.1013, 0.5989, 7.1812],
[2.4482, 0.9455, 11.2095],
[2.0149, 0.6192, 10.9263],
[0.2012, 0.2611, 5.4631],
]
exit = [
-1,
-1,
-1,
1,
1,
-1,
1,
-1,
1,
1,
-1,
1,
-1,
-1,
-1,
-1,
1,
1,
1,
1,
-1,
1,
1,
1,
1,
-1,
-1,
1,
-1,
1,
]
if __name__ == "__main__":
import doctest
doctest.testmod()
network = Perceptron(
sample=samples, target=exit, learning_rate=0.01, epoch_number=1000, bias=-1
)
network.training()
print("Finished training perceptron")
print("Enter values to predict or q to exit")
while True:
sample: list = []
for i in range(len(samples[0])):
user_input = input("value: ").strip()
if user_input == "q":
break
observation = float(user_input)
sample.insert(i, observation)
network.sort(sample)
|
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""
This module contains the Dispatcher class.
"""
import logging
from functools import wraps
from inspect import getargspec
from threading import Thread, BoundedSemaphore, Lock
from re import match
from telegram import (TelegramError, Update, NullHandler)
H = NullHandler()
logging.getLogger(__name__).addHandler(H)
semaphore = None
running_async = 0
async_lock = Lock()
def run_async(func):
"""
Function decorator that will run the function in a new thread. A function
decorated with this will have to include **kwargs in their parameter list,
which will contain all optional parameters.
Args:
func (function): The function to run in the thread.
Returns:
function:
"""
@wraps(func)
def pooled(*pargs, **kwargs):
"""
A wrapper to run a thread in a thread pool
"""
global running_async, async_lock
result = func(*pargs, **kwargs)
semaphore.release()
with async_lock:
running_async -= 1
return result
@wraps(func)
def async_func(*pargs, **kwargs):
"""
A wrapper to run a function in a thread
"""
global running_async, async_lock
thread = Thread(target=pooled, args=pargs, kwargs=kwargs)
semaphore.acquire()
with async_lock:
running_async += 1
thread.start()
return thread
return async_func
class Dispatcher:
"""
This class dispatches all kinds of updates to its registered handlers.
A handler is a function that usually takes the following parameters
bot:
The telegram.Bot instance that received the message
update:
The update that should be handled by the handler
Error handlers take an additional parameter
error:
The TelegramError instance that was raised during processing the
update
All handlers, except error handlers, can also request more information by
appending one or more of the following arguments in their argument list for
convenience
update_queue:
The Queue instance which contains all new updates and is
processed by the Dispatcher. Be careful with this - you might
create an infinite loop.
args:
If the update is an instance str or telegram.Update, this will be
a list that contains the content of the message split on spaces,
except the first word (usually the command).
Example: '/add item1 item2 item3' -> ['item1', 'item2', 'item3']
For other updates, args will be None
For regex-based handlers, you can also request information about the match.
For all other handlers, these will be None
groups:
A tuple that contains the result of
re.match(matcher, ...).groups()
groupdict:
A dictionary that contains the result of
re.match(matcher, ...).groupdict()
Args:
bot (telegram.Bot): The bot object that should be passed to the
handlers
update_queue (queue.Queue): The synchronized queue that will
contain the updates.
"""
def __init__(self, bot, update_queue, workers=4):
self.bot = bot
self.update_queue = update_queue
self.telegram_message_handlers = []
self.telegram_command_handlers = {}
self.telegram_regex_handlers = {}
self.string_regex_handlers = {}
self.string_command_handlers = {}
self.type_handlers = {}
self.unknown_telegram_command_handlers = []
self.unknown_string_command_handlers = []
self.error_handlers = []
self.logger = logging.getLogger(__name__)
self.running = False
self.__lock = Lock()
global semaphore
if not semaphore:
semaphore = BoundedSemaphore(value=workers)
else:
self.logger.info("Semaphore already initialized, skipping.")
class _Stop(object):
"""
A class which objects can be passed into the update queue to stop the
thread
"""
pass
def start(self):
"""
Thread target of thread 'dispatcher'. Runs in background and processes
the update queue.
"""
self.__lock.acquire()
if not self.running:
self.running = True
self.__lock.release()
self.logger.info('Dispatcher thread started')
while True:
update = None
try:
# Pop update from update queue.
# Blocks if no updates are available.
update = self.update_queue.get()
if type(update) is self._Stop:
break
self.processUpdate(update)
self.logger.debug('Processed Update: %s' % update)
# Dispatch any errors
except TelegramError as te:
self.logger.warn("Error was raised while processing "
"Update.")
self.dispatchError(update, te)
# All other errors should not stop the thread, just print them
except:
self.logger.exception("An uncaught error was raised while "
"processing an update")
else:
self.__lock.release()
self.logger.info('Dispatcher thread stopped')
def stop(self):
"""
Stops the thread
"""
with self.__lock:
if self.running:
self.running = False
self.update_queue.put(self._Stop())
def processUpdate(self, update):
"""
Processes a single update.
Args:
update (any):
"""
handled = False
# Custom type handlers
for t in self.type_handlers:
if isinstance(update, t):
self.dispatchType(update)
handled = True
# string update
if type(update) is str and update.startswith('/'):
self.dispatchStringCommand(update)
handled = True
elif type(update) is str:
self.dispatchRegex(update)
handled = True
# An error happened while polling
if isinstance(update, TelegramError):
self.dispatchError(None, update)
handled = True
# Telegram update (regex)
if isinstance(update, Update):
self.dispatchRegex(update)
handled = True
# Telegram update (command)
if isinstance(update, Update) \
and update.message.text.startswith('/'):
self.dispatchTelegramCommand(update)
handled = True
# Telegram update (message)
elif isinstance(update, Update):
self.dispatchTelegramMessage(update)
handled = True
# Update not recognized
if not handled:
self.dispatchError(update, TelegramError(
"Received update of unknown type %s" % type(update)))
# Add Handlers
def addTelegramMessageHandler(self, handler):
"""
Registers a message handler in the Dispatcher.
Args:
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
self.telegram_message_handlers.append(handler)
def addTelegramCommandHandler(self, command, handler):
"""
Registers a command handler in the Dispatcher.
Args:
command (str): The command keyword that this handler should be
listening to.
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
if command not in self.telegram_command_handlers:
self.telegram_command_handlers[command] = []
self.telegram_command_handlers[command].append(handler)
def addTelegramRegexHandler(self, matcher, handler):
"""
Registers a regex handler in the Dispatcher. If handlers will be
called if re.match(matcher, update.message.text) is True.
Args:
matcher (str/__Regex): A regex string or compiled regex object that
matches on messages that handler should be listening to
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
if matcher not in self.telegram_regex_handlers:
self.telegram_regex_handlers[matcher] = []
self.telegram_regex_handlers[matcher].append(handler)
def addStringCommandHandler(self, command, handler):
"""
Registers a string-command handler in the Dispatcher.
Args:
command (str): The command keyword that this handler should be
listening to.
handler (function): A function that takes (Bot, str, *args) as
arguments.
"""
if command not in self.string_command_handlers:
self.string_command_handlers[command] = []
self.string_command_handlers[command].append(handler)
def addStringRegexHandler(self, matcher, handler):
"""
Registers a regex handler in the Dispatcher. If handlers will be
called if re.match(matcher, string) is True.
Args:
matcher (str/__Regex): A regex string or compiled regex object that
matches on the string input that handler should be listening to
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
if matcher not in self.string_regex_handlers:
self.string_regex_handlers[matcher] = []
self.string_regex_handlers[matcher].append(handler)
def addUnknownTelegramCommandHandler(self, handler):
"""
Registers a command handler in the Dispatcher, that will receive all
commands that have no associated handler.
Args:
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
self.unknown_telegram_command_handlers.append(handler)
def addUnknownStringCommandHandler(self, handler):
"""
Registers a string-command handler in the Dispatcher, that will
receive all commands that have no associated handler.
Args:
handler (function): A function that takes (Bot, str, *args) as
arguments.
"""
self.unknown_string_command_handlers.append(handler)
def addErrorHandler(self, handler):
"""
Registers an error handler in the Dispatcher.
Args:
handler (function): A function that takes (Bot, TelegramError) as
arguments.
"""
self.error_handlers.append(handler)
def addTypeHandler(self, the_type, handler):
"""
Registers a type handler in the Dispatcher. This allows you to send
any type of object into the update queue.
Args:
the_type (type): The type this handler should listen to
handler (function): A function that takes (Bot, type, *args) as
arguments.
"""
if the_type not in self.type_handlers:
self.type_handlers[the_type] = []
self.type_handlers[the_type].append(handler)
# Remove Handlers
def removeTelegramMessageHandler(self, handler):
"""
De-registers a message handler.
Args:
handler (any):
"""
if handler in self.telegram_message_handlers:
self.telegram_message_handlers.remove(handler)
def removeTelegramCommandHandler(self, command, handler):
"""
De-registers a command handler.
Args:
command (str): The command
handler (any):
"""
if command in self.telegram_command_handlers \
and handler in self.telegram_command_handlers[command]:
self.telegram_command_handlers[command].remove(handler)
def removeTelegramRegexHandler(self, matcher, handler):
"""
De-registers a regex handler.
Args:
matcher (str/__Regex): The regex matcher object or string
handler (any):
"""
if matcher in self.telegram_regex_handlers \
and handler in self.telegram_regex_handlers[matcher]:
self.telegram_regex_handlers[matcher].remove(handler)
def removeStringCommandHandler(self, command, handler):
"""
De-registers a string-command handler.
Args:
command (str): The command
handler (any):
"""
if command in self.string_command_handlers \
and handler in self.string_command_handlers[command]:
self.string_command_handlers[command].remove(handler)
def removeStringRegexHandler(self, matcher, handler):
"""
De-registers a regex handler.
Args:
matcher (str/__Regex): The regex matcher object or string
handler (any):
"""
if matcher in self.string_regex_handlers \
and handler in self.string_regex_handlers[matcher]:
self.string_regex_handlers[matcher].remove(handler)
def removeUnknownTelegramCommandHandler(self, handler):
"""
De-registers an unknown-command handler.
Args:
handler (any):
"""
if handler in self.unknown_telegram_command_handlers:
self.unknown_telegram_command_handlers.remove(handler)
def removeUnknownStringCommandHandler(self, handler):
"""
De-registers an unknown-command handler.
Args:
handler (any):
"""
if handler in self.unknown_string_command_handlers:
self.unknown_string_command_handlers.remove(handler)
def removeErrorHandler(self, handler):
"""
De-registers an error handler.
Args:
handler (any):
"""
if handler in self.error_handlers:
self.error_handlers.remove(handler)
def removeTypeHandler(self, the_type, handler):
"""
De-registers a type handler.
Args:
handler (any):
"""
if the_type in self.type_handlers \
and handler in self.type_handlers[the_type]:
self.type_handlers[the_type].remove(handler)
def dispatchTelegramCommand(self, update):
"""
Dispatches an update that contains a command.
Args:
command (str): The command keyword
update (telegram.Update): The Telegram update that contains the
command
"""
command = update.message.text.split(' ')[0][1:].split('@')[0]
if command in self.telegram_command_handlers:
self.dispatchTo(self.telegram_command_handlers[command], update)
else:
self.dispatchTo(self.unknown_telegram_command_handlers, update)
def dispatchRegex(self, update):
"""
Dispatches an update to all string or telegram regex handlers that
match the string/message content.
Args:
update (str, Update): The update that should be checked for matches
"""
if isinstance(update, Update):
handlers = self.telegram_regex_handlers
to_match = update.message.text
elif isinstance(update, str):
handlers = self.string_regex_handlers
to_match = update
for matcher in handlers:
m = match(matcher, to_match)
if m:
for handler in handlers[matcher]:
self.call_handler(handler,
update,
groups=m.groups(),
groupdict=m.groupdict())
def dispatchStringCommand(self, update):
"""
Dispatches a string-update that contains a command.
Args:
update (str): The string input
"""
command = update.split(' ')[0][1:]
if command in self.string_command_handlers:
self.dispatchTo(self.string_command_handlers[command], update)
else:
self.dispatchTo(self.unknown_string_command_handlers, update)
def dispatchType(self, update):
"""
Dispatches an update of any type.
Args:
update (any): The update
"""
for t in self.type_handlers:
if isinstance(update, t):
self.dispatchTo(self.type_handlers[t], update)
def dispatchTelegramMessage(self, update):
"""
Dispatches an update that contains a regular message.
Args:
update (telegram.Update): The Telegram update that contains the
message.
"""
self.dispatchTo(self.telegram_message_handlers, update)
def dispatchError(self, update, error):
"""
Dispatches an error.
Args:
update (any): The pdate that caused the error
error (telegram.TelegramError): The Telegram error that was raised.
"""
for handler in self.error_handlers:
handler(self.bot, update, error)
def dispatchTo(self, handlers, update, **kwargs):
"""
Dispatches an update to a list of handlers.
Args:
handlers (list): A list of handler-functions.
update (any): The update to be dispatched
"""
for handler in handlers:
self.call_handler(handler, update, **kwargs)
def call_handler(self, handler, update, **kwargs):
"""
Calls an update handler. Checks the handler for keyword arguments and
fills them, if possible.
Args:
handler (function): An update handler function
update (any): An update
"""
target_kwargs = {}
fargs = getargspec(handler).args
'''
async handlers will receive all optional arguments, since we can't
their argument list.
'''
is_async = 'pargs' == getargspec(handler).varargs
if is_async or 'update_queue' in fargs:
target_kwargs['update_queue'] = self.update_queue
if is_async or 'args' in fargs:
if isinstance(update, Update):
args = update.message.text.split(' ')[1:]
elif isinstance(update, str):
args = update.split(' ')[1:]
else:
args = None
target_kwargs['args'] = args
if is_async or 'groups' in fargs:
target_kwargs['groups'] = kwargs.get('groups', None)
if is_async or 'groupdict' in fargs:
target_kwargs['groupdict'] = kwargs.get('groupdict', None)
handler(self.bot, update, **target_kwargs)
|
|
# Authors: Daichi Yoshikawa <daichi.yoshikawa@gmail.com>
# License: BSD 3 clause
import operator
from collections.abc import Iterable
from functools import reduce
import numpy as np
import cupy as cp
def prod(x):
if isinstance(x, Iterable):
return reduce(operator.mul, x, 1)
else:
return x
def asnumpy(x):
if isinstance(type(x), np.ndarray):
return x
else:
return cp.asnumpy(x)
def shuffle_data(x, y):
"""Shuffle descriptive features and target features.
The 2 matrices must have the same number of rows.
If not, AttributeError is thrown.
Arguments
---------
x : np.array
Descriptive features in 2d array
whose shape is (num of data, num of feature).
y : np.array
Target features in 2d array
whose shape is (num of data, num of feature).
Returns
-------
np.array, np.array
Rondomly row-shuffled x and y arrays.
"""
if x.shape[0] != y.shape[0]:
msg1 = 'x rows : ' + str(x.shape[0]) + '\n'
msg2 = 'y rows : ' + str(y.shape[0]) + '\n'
msg = 'x and y data size are different.\n' + msg1 + msg2
raise AttributeError(msg)
index = np.arange(x.shape[0])
np.random.shuffle(index)
return x[index], y[index]
def split_data(x, y, test_data_ratio):
"""Split one dataset which consists of
descriptive features and target features into 2 datasets,
that is training data and test data.
The number of x's row must be the same as the one of y's row.
Arguments
---------
x : np.array
Descriptive features in 2d array
whose shape is (num of data, num of feature).
y : np.array
Target features in 2d array
whose shape is (num of data, num of feature).
test_data_ratio : float
Desired ratio of test data in range from 0.0 to 1.0.
If 0.3, 30% data is for test data and
rest of the data is for training data.
Returns
-------
np.array, np.array, np.array, np.array
The former 2 arrays are descriptive features
and target features of training data.
The latter 2 arrays are descriptive features
and target features of test data.
"""
training_data_num = x.shape[0]
if (test_data_ratio > 0.) and (test_data_ratio < 1.):
training_data_num = int(training_data_num * (1. - test_data_ratio))
x_train = x[:training_data_num, :]
y_train = y[:training_data_num, :]
x_test = x[training_data_num:, :]
y_test = y[training_data_num:, :]
return x_train, y_train, x_test, y_test
def gaussian_normalization(x, ep=1e-5):
"""Normalize 2d matrix to have mean ==0 and standard deviation == 0
w.r.t each feature.
This function directly modifies the argument x.
Arguments
---------
x : np.array
Features in 2d array whose shape is (num of data, num of feature)
ep : float
Used to avoid 0 devision.
"""
x -= x.mean(axis=0).T
x /= np.sqrt(x.var(axis=0).T) + ep
def scale_normalization(x, ep=1e-5):
"""Normalize 2d matrix to have values' range from 0.0 to 1.0
w.r.t each feature.
This function directly modifies the argument x.
Arguments
---------
x : np.array
Features in 2d array whose shape is (num of data, num of feature)
ep : float
Used to avoid 0 devision.
"""
x -= x.min(axis=0).T
amp = x.max(axis=0) - x.min(axis=0)
amp = amp.reshape(amp.size, -1)
x /= (amp.T + ep)
def w2im(w, shape, layout):
"""Reshape 2d weight matrix to 2d image matrix
which represents well aligned filters.
This is utilized to visualize weight matrix by matplotlib.pyplot.
Arguments
---------
w : np.array
Weight matrix in 2d array.
shape : tuple (rows, cols)
Shape of filter. In the case of multi-channel, filters are
taken as single channel by taking average over channels.
layout : tuple (rows, cols)
Number of filter to display
in direction of rows and cols respectively.
Returns
-------
np.array
Well-aligned weight matrix in 2d array.
"""
if (w.shape[0] - 1) != prod(shape):
msg = ('(w.shape[0] - 1) != prod(shape)\n'
' w.shape[0] : {}\n'
' shape.size : {}'
.format(w.shape[0], prod(shape)))
raise RuntimeError(msg)
if w.shape[1] < prod(layout):
msg = ('w.shape[1] != prod(shape)\n'
' w.shape[1] : {}\n'
' shape.size : {}'
.format(w.shape[1], prod(layout)))
raise RuntimeError(msg)
img = w[1:, :prod(layout)].T
img = img.reshape(layout[0], layout[1], shape[0], shape[1])
rows, cols = layout[0]*shape[0], layout[1]*shape[1]
img = img.transpose(0, 2, 1, 3).reshape(rows, cols)
return img
def is_multi_channels_image(shape):
"""Returns true when shape is (channels, rows, cols).
Convolutional Neural Network(CNN) and
fully connected neural network(NN) require different shape of input.
Tuple (channels, rows, cols) and a scalar value.
If it detects invalid shape, raise RuntimeError.
Arguments
---------
shape : int or tuple
input_shape/output_shape of layer.
If int, it represents number of neurons of layer.
If tuple, it represents shape of input image in format of
(channels, rows, cols).
Returns
-------
bool
If true, shape is in format of (channels, rows, cols).
"""
if isinstance(shape, tuple):
if len(shape) == 3:
return True
else:
msg = ('Shape must be int or tuple (channels, rows, cols).\n'
' shape : {}'.format(str(shape)))
raise RuntimeError(msg)
return False
def flatten(m, im_shape):
"""Flatten matrix in shape of (batches, channels, rows, cols)
to (batches, -1).
This function has no side effect, that is, it doesn't modify
argument "m" directly.
Arguments
---------
m : np.array
4D matrix in shape of (batches, channels, rows, cols).
im_shape : tuple
Shape of image, which is supposed to be (channels, rows, cols).
Returns
-------
np.array
Flattened matrix in 2d array.
"""
batches = m.shape[0]
chs, rows, cols = im_shape
return m.reshape(batches, chs*rows*cols)
def unflatten(m, im_shape):
"""Revert flattened matrix(batches, -1) to unflattened matrix
(batches, channels, rows, cols).
This function has no side effect, that is, it doesn't modify
argument "m" directly.
Arguments
---------
m : np.array
2D matrix in shape of (batches, -1).
im_shape : tuple
Shape of image, which is supposed to be (channels, rows, cols).
Returns
-------
np.array
Unflattened matrix in 4d array.
"""
batches = m.shape[0]
chs, rows, cols = im_shape
return m.reshape(batches, chs, rows, cols)
|
|
# dataset/table/_SUCCESS
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""unit tests for gcs_ocn_bq_ingest"""
import re
import time
import unittest
from typing import Dict, Optional
from unittest.mock import Mock
import pytest
from google.cloud import bigquery
from google.cloud import storage
import gcs_ocn_bq_ingest.common.constants
import gcs_ocn_bq_ingest.common.utils
import gcs_ocn_bq_ingest.main
COMPILED_DEFAULT_DENTINATION_REGEX = re.compile(
gcs_ocn_bq_ingest.common.constants.DEFAULT_DESTINATION_REGEX)
@pytest.mark.parametrize(
"test_input,expected",
[
(
"dataset/table/_SUCCESS", # flat
{
"dataset": "dataset",
"table": "table",
"partition": None,
"yyyy": None,
"mm": None,
"dd": None,
"hh": None,
"batch": None
}),
(
"dataset/table/$20201030/_SUCCESS", # partitioned
{
"dataset": "dataset",
"table": "table",
"partition": "$20201030",
"yyyy": None,
"mm": None,
"dd": None,
"hh": None,
"batch": None
}),
(
"dataset/table/$20201030/batch_id/_SUCCESS", # partitioned, batched
{
"dataset": "dataset",
"table": "table",
"partition": "$20201030",
"yyyy": None,
"mm": None,
"dd": None,
"hh": None,
"batch": "batch_id"
}),
(
"dataset/table/batch_id/_SUCCESS", # batched (no partitioning)
{
"dataset": "dataset",
"table": "table",
"partition": None,
"yyyy": None,
"mm": None,
"dd": None,
"hh": None,
"batch": "batch_id"
}),
("dataset/table/2020/01/02/03/batch_id/_SUCCESS", {
"dataset": "dataset",
"table": "table",
"partition": None,
"yyyy": "2020",
"mm": "01",
"dd": "02",
"hh": "03",
"batch": "batch_id"
}),
("project.dataset/table/2020/01/02/03/batch_id/_SUCCESS", {
"dataset": "project.dataset",
"table": "table",
"partition": None,
"yyyy": "2020",
"mm": "01",
"dd": "02",
"hh": "03",
"batch": "batch_id"
}),
("project.dataset/table/historical/2020/01/02/03/batch_id/_SUCCESS", {
"dataset": "project.dataset",
"table": "table",
"partition": None,
"yyyy": "2020",
"mm": "01",
"dd": "02",
"hh": "03",
"batch": "batch_id"
}),
("project.dataset/table/incremental/2020/01/02/04/batch_id/_SUCCESS", {
"dataset": "project.dataset",
"table": "table",
"partition": None,
"yyyy": "2020",
"mm": "01",
"dd": "02",
"hh": "04",
"batch": "batch_id"
}),
])
def test_default_destination_regex(test_input: str,
expected: Dict[str, Optional[str]]):
"""ensure our default regex handles each scenarios we document.
this test is to support improving this regex in the future w/o regressing
for existing use cases.
"""
match = COMPILED_DEFAULT_DENTINATION_REGEX.match(test_input)
if match:
assert match.groupdict() == expected
else:
raise AssertionError(f"{COMPILED_DEFAULT_DENTINATION_REGEX}"
f" did not match test case {test_input}.")
@pytest.mark.parametrize("test_input,expected", [
([], []),
([[]], []),
([["foo"], ["bar", "baz"]], ["foo", "bar", "baz"]),
([["foo"], []], ["foo"]),
([["foo"], [], ["bar", "baz"]], ["foo", "bar", "baz"]),
])
def test_flattend2dlist(test_input, expected):
assert gcs_ocn_bq_ingest.common.utils.flatten2dlist(test_input) == expected
@pytest.mark.parametrize(
"original, update, expected",
[
# yapf: disable
( # empty original
{}, {
"a": 1
}, {
"a": 1
}),
( # empty update
{
"a": 1
}, {}, {
"a": 1
}),
( # basic update of top-level key
{
"a": 1
}, {
"a": 2
}, {
"a": 2
}),
( # update of list
{
"a": [1]
}, {
"a": [2]
}, {
"a": [2]
}),
( # update of nested key
{
"a": {
"b": 1
}
}, {
"a": {
"b": 2
}
}, {
"a": {
"b": 2
}
}),
( # don't drop keys that only appear in original
{
"a": {
"b": 1,
"c": 2
},
"d": 3
}, {
"a": {
"b": 4
},
}, {
"a": {
"b": 4,
"c": 2
},
"d": 3
}),
# yapf: enable
])
def test_recursive_update(original, update, expected):
assert gcs_ocn_bq_ingest.common.utils.recursive_update(original,
update) == expected
def test_triage_event(mock_env, mocker):
test_event_blob: storage.Blob = storage.Blob.from_string(
"gs://foo/bar/baz/00/_SUCCESS")
apply_mock = mocker.patch('gcs_ocn_bq_ingest.common.utils.apply')
bq_mock = Mock()
bq_mock.project = "foo"
gcs_ocn_bq_ingest.main.triage_event(None, bq_mock, test_event_blob,
time.monotonic())
apply_mock.assert_called_once()
def test_triage_event_ordered(ordered_mock_env, mocker):
enforce_ordering = True
test_event_blob: storage.Blob = storage.Blob.from_string(
"gs://foo/bar/baz/00/_SUCCESS")
apply_mock = mocker.patch('gcs_ocn_bq_ingest.common.utils.apply')
publisher_mock = mocker.patch(
'gcs_ocn_bq_ingest.common.ordering.backlog_publisher')
bq_mock = Mock()
bq_mock.project = "foo"
gcs_ocn_bq_ingest.main.triage_event(None,
bq_mock,
test_event_blob,
time.monotonic(),
enforce_ordering=enforce_ordering)
publisher_mock.assert_called_once()
test_event_blob: storage.Blob = storage.Blob.from_string(
"gs://foo/bar/baz/_BACKFILL")
subscriber_mock = mocker.patch(
'gcs_ocn_bq_ingest.common.ordering.backlog_subscriber')
gcs_ocn_bq_ingest.main.triage_event(None,
None,
test_event_blob,
time.monotonic(),
enforce_ordering=enforce_ordering)
subscriber_mock.assert_called_once()
test_event_blob: storage.Blob = storage.Blob.from_string(
"gs://foo/bar/baz/_backlog/00/_SUCCESS")
monitor_mock = mocker.patch(
'gcs_ocn_bq_ingest.common.ordering.subscriber_monitor')
gcs_ocn_bq_ingest.main.triage_event(None,
None,
test_event_blob,
time.monotonic(),
enforce_ordering=enforce_ordering)
monitor_mock.assert_called_once()
apply_mock.assert_not_called()
def test_create_job_id():
job_id = gcs_ocn_bq_ingest.common.utils.create_job_id(
"bucket/source/dataset/table/2021/06/22/01/_SUCCESS")
assert job_id.split('_SUCCESS')[
0] == 'gcf-ingest-bucket-source-dataset-table-2021-06-22-01-'
def test_create_job_id_with_datasource_name_and_partition():
table = bigquery.Table.from_string("project.dataset.table$2021062201")
job_id = gcs_ocn_bq_ingest.common.utils.create_job_id(
"bucket/source/dataset/table/2021/06/22/01/_SUCCESS", "source", table)
job_id = '-'.join(job_id.split('-')[0:9])
assert job_id == 'gcf-ingest-source-dataset-table-2021-06-22-01'
def test_create_job_id_with_datasource_name_and_partition_missing_hour():
table = bigquery.Table.from_string("project.dataset.table$20210622")
job_id = gcs_ocn_bq_ingest.common.utils.create_job_id(
"bucket/source/dataset/table/2021/06/22/_SUCCESS", "source", table)
job_id = '-'.join(job_id.split('-')[0:8])
assert job_id == 'gcf-ingest-source-dataset-table-2021-06-22'
def test_create_job_id_with_datasource_name_and_no_partition():
table = bigquery.Table.from_string("project.dataset.table")
job_id = gcs_ocn_bq_ingest.common.utils.create_job_id(
"bucket/source/dataset/table/_SUCCESS", "source", table)
job_id = '-'.join(job_id.split('-')[0:5])
assert job_id == 'gcf-ingest-source-dataset-table'
def test_compact_source_uris_with_wildcards():
long_uris = [
"gs://bucket/batch/file1.csv", "gs://bucket/batch/file2.csv",
"gs://bucket/batch/file3.csv"
]
source_uris = gcs_ocn_bq_ingest.common.utils.compact_source_uris_with_wildcards(
long_uris)
assert source_uris == ["gs://bucket/batch/*.csv"]
def test_compact_source_uris_with_wildcards_no_file_extension():
long_uris_no_extension = [
"gs://bucket/batch/file1", "gs://bucket/batch/file2",
"gs://bucket/batch/file3", "gs://bucket/batch/file4.csv"
]
source_uris = gcs_ocn_bq_ingest.common.utils.compact_source_uris_with_wildcards(
long_uris_no_extension)
unittest.TestCase().assertCountEqual(source_uris, [
"gs://bucket/batch/file1", "gs://bucket/batch/file2",
"gs://bucket/batch/file3", "gs://bucket/batch/*.csv"
])
|
|
# -*- coding: utf-8 -*-
import hashlib
import libtorrent as lt
import logging
from threading import Timer, Event
import os
import time
from p2c.exceptions import SessionNotBindedException, TorrentHasNotMetadataYet
import settings
from torrent.movie import Movie
SOURCE_TYPES = ("MAGNET", "TORRENT")
logger = logging.getLogger(__name__)
class Torrent(object):
def __init__(self, source_type, source, name):
"""
:type source: str magnet or torrent file path
:type name: str
:type source_type: str
"""
if not source_type in SOURCE_TYPES:
raise Exception(
"source_type must be one of {0}".format(SOURCE_TYPES))
self.name = name
self.source_type = source_type
self.source = source
self.torrent_handler = None
self._torrent_info = None
# dict where key is path and value is Movie instance
# this is files which are downloading or downloaded
self.files = None
# piece_length in this torrent
self.piece_length = None
# amount of pieces which made up DOWNLOAD_PIECE_SIZE
self._jump = None
# if first prioritizing task was run once
self._prioritized = False
self.priority_interval = settings.PRIORITY_INTERVAL
self._priority_thread_stop = Event()
self._priority_timer = None
# currently downloading Movie
self._downloading = None
def __del__(self):
self._stop_torrent_threads()
def __str__(self):
return self.name
def set_source(self, source, session):
self.source = source
if self.source:
self.bind_session(session)
def bind_session(self, session):
"""
Creates torrent handler based on source_type
"""
add_data = {}
if self.source_type == "TORRENT":
add_data['ti'] = lt.torrent_info(self.source)
elif self.source_type == "MAGNET":
add_data['url'] = self.source
add_data['save_path'] = self._get_download_dir()
add_data['storage_mode'] = lt.storage_mode_t(1)
self.torrent_handler = session.add_torrent(add_data)
self._prioritize_to_none()
def get_filelist(self):
info = self.get_torrent_info(wait=True)
return [file.path for file in info.files()]
def get_movies_filelist(self):
if self.files is None:
self._create_movies()
return list(self.files.keys())
def get_movies(self):
if self.files is None:
self._create_movies()
return list(self.files.values())
def download_file(self, filename:str):
if not filename in self.get_movies_filelist():
raise Exception("filename not found in torrent")
self._prioritize_to_none()
self._downloading = self.files[filename]
self._run_torrent_threads()
def pause_download(self):
self._stop_torrent_threads()
self.torrent_handler.pause()
self._downloading = None
def has_torrent_info(self):
"""
Checks if torrent has downloaded metadata
"""
try:
self.get_torrent_info()
return True
except (TorrentHasNotMetadataYet, SessionNotBindedException):
return False
def get_torrent_info(self, wait=False):
"""
Gets torrent's metadata
"""
if self._torrent_info != None:
return self._torrent_info
if self.torrent_handler is None:
if wait:
while not self.torrent_handler is None:
time.sleep(0.1)
else:
raise SessionNotBindedException
if not self.torrent_handler.has_metadata():
if wait:
while not self.torrent_handler.has_metadata():
time.sleep(0.1)
else:
raise TorrentHasNotMetadataYet
self._torrent_info = self.torrent_handler.get_torrent_info()
return self._torrent_info
def get_status(self):
"""
Gets torrent's status with field like download rate, peers number,
state and progress level
"""
status = self.torrent_handler.status()
state_str = ['queued', 'checking', 'downloading metadata',
'downloading', 'finished', 'seeding', 'allocating',
'checking fastresume']
data = {
'download_rate': status.download_rate,
'download_payload_rate': status.download_payload_rate,
'num_peers': status.num_peers,
'state': state_str[status.state],
'progress': status.progress
}
return data
def get_seconds_to_buffer(self):
rate = self.get_status()['download_rate']
if(rate > 100 * 1024):
# round to 100 kbs, 200 kbs, 300 kbs
rate = int(rate / (100 * 1024)) * 100 * 1024
movie = self.get_downloading_movie()
# minimum rate
if movie and rate > 30 * 1024:
return int(movie.pieces_to_play * movie.piece_length / rate)
def get_downloading_movie(self):
return self._downloading
def _create_movies(self):
info = self.get_torrent_info()
files = info.files()
self.piece_length = info.piece_length()
self.priority_interval = settings.PRIORITY_INTERVAL * self.piece_length / (
1024 ** 2)
self._jump = int(settings.DOWNLOAD_PIECE_SIZE / self.piece_length) + 1
self.files = {}
for file in files:
ext = os.path.splitext(file.path)[1]
if ext and ext[1:].lower() in settings.SUPPORTED_MOVIE_EXTENSIONS:
first_piece = int(file.offset / self.piece_length)
last_piece = int((file.size + file.offset) / self.piece_length)
self.files[file.path] = Movie(path=file.path,
size=file.size, first_piece=first_piece,
last_piece=last_piece,
piece_length=self.piece_length,
download_dir=self._get_download_dir())
def _update_movies_progress(self):
"""
Updates movie progress based on number of downloaded pieces
"""
p_downloaded = self.torrent_handler.status().pieces
movie = self.get_downloading_movie()
first_piece, last_piece = movie.first_piece, movie.last_piece
# logger.debug("first_piece: {}".format(first_piece))
# logger.debug("last_piece: {}".format(last_piece ))
counter = 0
for item in p_downloaded[first_piece:last_piece]:
if item == True:
counter += 1
else:
break
# logger.debug("download_pieces inside thread is: {}".format(counter))
movie.downloaded_pieces = counter
def _manage_pieces_priority(self):
"""
Sets priority blocks. First pieces should be downloaded first swo its
have the highest priority.
"""
p_downloaded = self.torrent_handler.status().pieces
movie = self.get_downloading_movie()
if not movie:
return
first_piece, last_piece = movie.cur_first_piece, movie.cur_last_piece
if not False in p_downloaded[first_piece:first_piece + self._jump + 1]:
# all block downloaded
first_piece += self._jump
movie.cur_first_piece = first_piece
# prioritezing
# [7, 7, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...]
if first_piece + self._jump + self._jump <= last_piece:
for piece in range(first_piece + 4 * self._jump,
last_piece + 1):
# logger.debug("the lowest priority for: {}".format(piece))
self.torrent_handler.piece_priority(piece, 0)
if first_piece + self._jump <= last_piece:
for piece in range(first_piece + 2 * self._jump,
min(last_piece + 1, first_piece + 4 * self._jump)):
# logger.debug("low priority for: {}".format(piece))
self.torrent_handler.piece_priority(piece, 2)
if first_piece <= last_piece:
for piece in range(first_piece,
min(last_piece + 1, first_piece + 2 * self._jump)):
# logger.debug("the highest priority for: {}".format(piece))
self.torrent_handler.piece_priority(piece, 7)
# for mp4 get 512KB end of file
# TODO: bug below
# for piece in range(
# last_piece - int(self.piece_length / 512 * 1024) + 1,
# last_piece):
# logger.debug("the highest priority for (512KB end of file): {}".format(piece))
# self.torrent_handler.piece_priority(piece, 7)
self._update_movies_progress()
if not self._priority_thread_stop.is_set():
if self._priority_timer:
self._priority_timer.cancel()
self._priority_timer = None
self._run_torrent_threads()
def _run_torrent_threads(self):
# logger.debug("run threads for {}".format(self.priority_interval))
if not self._priority_thread_stop.is_set():
if not self._priority_timer:
self._priority_timer = Timer(self.priority_interval,
self._manage_pieces_priority)
self._priority_timer.start()
def _stop_torrent_threads(self):
self._priority_thread_stop.set()
if self._priority_timer:
self._priority_timer.cancel()
def _prioritize_to_none(self):
if not self._prioritized and self.has_torrent_info():
self._prioritized = True
info = self.get_torrent_info()
for piece in range(0, info.num_pieces()):
self.torrent_handler.piece_priority(piece, 0)
def _get_download_dir(self):
path = os.path.join(settings.DOWNLOAD_DIR,
hashlib.md5(self.name.encode()).hexdigest())
try:
os.makedirs(path)
except OSError:
pass
return path
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
import os
import copy
from itertools import chain
import numpy as np
from ....extern.six.moves import cStringIO as StringIO
from ... import ascii
from .... import table
from ....table.table_helpers import simple_table
from ....tests.helper import pytest, catch_warnings
from ....utils.exceptions import AstropyWarning, AstropyDeprecationWarning
from .... import units
from .common import setup_function, teardown_function
# Check to see if the BeautifulSoup dependency is present.
try:
from bs4 import BeautifulSoup, FeatureNotFound
HAS_BEAUTIFUL_SOUP = True
except ImportError:
HAS_BEAUTIFUL_SOUP = False
test_defs = [
dict(kwargs=dict(),
out="""\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(delimiter=None),
out="""\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(formats={'XCENTER': '%12.1f',
'YCENTER': '{0:.1f}'},
include_names=['XCENTER', 'YCENTER'],
strip_whitespace=False),
out="""\
XCENTER YCENTER
" 138.5" 256.4
" 18.1" 280.2
"""
),
dict(kwargs=dict(Writer=ascii.Rdb, exclude_names=['CHI']),
out="""\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tPIER\tPERROR
N\tN\tN\tN\tN\tN\tN\tN\tN\tS
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t0\tNo_error
"""
),
dict(kwargs=dict(Writer=ascii.Tab),
out="""\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tCHI\tPIER\tPERROR
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0.802\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t1.104\t0\tNo_error
"""
),
dict(kwargs=dict(Writer=ascii.Csv),
out="""\
ID,XCENTER,YCENTER,MAG,MERR,MSKY,NITER,SHARPNESS,CHI,PIER,PERROR
14,138.538,256.405,15.461,0.003,34.85955,4,-0.032,0.802,0,No_error
18,18.114,280.170,22.329,0.206,30.12784,4,-2.544,1.104,0,No_error
"""
),
dict(kwargs=dict(Writer=ascii.NoHeader),
out="""\
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.CommentedHeader),
out="""\
# ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.CommentedHeader, comment='&'),
out="""\
&ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.Latex),
out="""\
\\begin{table}
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
\\end{table}
"""
),
dict(kwargs=dict(Writer=ascii.AASTex),
out="""\
\\begin{deluxetable}{ccccccccccc}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{pixels} & \\colhead{pixels} & \\colhead{magnitudes} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable}
"""
),
dict(
kwargs=dict(Writer=ascii.AASTex, caption='Mag values \\label{tab1}', latexdict={
'units': {'MAG': '[mag]', 'XCENTER': '[pixel]'}, 'tabletype': 'deluxetable*',
'tablealign': 'htpb'}),
out="""\
\\begin{deluxetable*}{ccccccccccc}[htpb]
\\tablecaption{Mag values \\label{tab1}}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{[pixel]} & \\colhead{pixels} & \\colhead{[mag]} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable*}
"""
),
dict(
kwargs=dict(Writer=ascii.Latex, caption='Mag values \\label{tab1}',
latexdict={'preamble': '\\begin{center}', 'tablefoot': '\\end{center}',
'data_end': ['\\hline', '\\hline'],
'units':{'MAG': '[mag]', 'XCENTER': '[pixel]'},
'tabletype': 'table*',
'tablealign': 'h'},
col_align='|lcccccccccc|'),
out="""\
\\begin{table*}[h]
\\begin{center}
\\caption{Mag values \\label{tab1}}
\\begin{tabular}{|lcccccccccc|}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& [pixel] & pixels & [mag] & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\hline
\\hline
\\end{tabular}
\\end{center}
\\end{table*}
"""
),
dict(kwargs=dict(Writer=ascii.Latex, latexdict=ascii.latexdicts['template']),
out="""\
\\begin{tabletype}[tablealign]
preamble
\\caption{caption}
\\begin{tabular}{col_align}
header_start
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
header_end
data_start
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
data_end
\\end{tabular}
tablefoot
\\end{tabletype}
"""
),
dict(kwargs=dict(Writer=ascii.HTML, htmldict={'css': 'table,th,td{border:1px solid black;'}),
out="""\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
<style>
table,th,td{border:1px solid black; </style>
</head>
<body>
<table>
<thead>
<tr>
<th>ID</th>
<th>XCENTER</th>
<th>YCENTER</th>
<th>MAG</th>
<th>MERR</th>
<th>MSKY</th>
<th>NITER</th>
<th>SHARPNESS</th>
<th>CHI</th>
<th>PIER</th>
<th>PERROR</th>
</tr>
</thead>
<tr>
<td>14</td>
<td>138.538</td>
<td>256.405</td>
<td>15.461</td>
<td>0.003</td>
<td>34.85955</td>
<td>4</td>
<td>-0.032</td>
<td>0.802</td>
<td>0</td>
<td>No_error</td>
</tr>
<tr>
<td>18</td>
<td>18.114</td>
<td>280.170</td>
<td>22.329</td>
<td>0.206</td>
<td>30.12784</td>
<td>4</td>
<td>-2.544</td>
<td>1.104</td>
<td>0</td>
<td>No_error</td>
</tr>
</table>
</body>
</html>
"""
),
dict(kwargs=dict(Writer=ascii.Ipac),
out="""\
\\MERGERAD='INDEF'
\\IRAF='NOAO/IRAFV2.10EXPORT'
\\USER=''
\\HOST='tucana'
\\DATE='05-28-93'
\\TIME='14:46:13'
\\PACKAGE='daophot'
\\TASK='nstar'
\\IMAGE='test'
\\GRPFILE='test.psg.1'
\\PSFIMAGE='test.psf.1'
\\NSTARFILE='test.nst.1'
\\REJFILE='"hello world"'
\\SCALE='1.'
\\DATAMIN='50.'
\\DATAMAX='24500.'
\\GAIN='1.'
\\READNOISE='0.'
\\OTIME='00:07:59.0'
\\XAIRMASS='1.238106'
\\IFILTER='V'
\\RECENTER='yes'
\\FITSKY='no'
\\PSFMAG='16.594'
\\PSFRAD='5.'
\\FITRAD='3.'
\\MAXITER='50'
\\MAXGROUP='60'
\\FLATERROR='0.75'
\\PROFERROR='5.'
\\CLIPEXP='6'
\\CLIPRANGE='2.5'
| ID| XCENTER| YCENTER| MAG| MERR| MSKY| NITER| SHARPNESS| CHI| PIER| PERROR|
| long| double| double| double| double| double| long| double| double| long| char|
| | pixels| pixels| magnitudes| magnitudes| counts| | | | | perrors|
| null| null| null| null| null| null| null| null| null| null| null|
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
]
test_defs_no_data = [
dict(kwargs=dict(Writer=ascii.Ipac),
out="""\
\\ This is an example of a valid comment.
\\ The 2nd data line is used to verify the exact column parsing
\\ (unclear if this is a valid for the IPAC format)
\\catalog='sao'
\\date='Wed Sp 20 09:48:36 1995'
\\mykeyword='Another way for defining keyvalue string'
| ra| dec| sai| v2|sptype|
|double|double|long|double| char|
| unit| unit|unit| unit| ergs|
| null| null|null| null| null|
"""
),
]
tab_to_fill = ['a b c', '1 2 3', '1 1 3']
test_defs_fill_value = [
dict(kwargs=dict(),
out="""\
a b c
1 2 3
1 1 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w')),
out="""\
a b c
w 2 3
w w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w', 'b')),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values = ('1', 'w'),
fill_include_names = ['b']),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values = ('1', 'w'),
fill_exclude_names = ['a']),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values = ('1', 'w'),
fill_include_names = ['a'],
fill_exclude_names = ['a', 'b']),
out="""\
a b c
1 2 3
1 1 3
"""
),
dict(kwargs=dict(fill_values = [('1', 'w')],
formats={'a': '%4.2f'}),
out="""\
a b c
1.00 2 3
1.00 w 3
"""
),
]
test_def_masked_fill_value = [
dict(kwargs=dict(),
out="""\
a b c
"" 2 3
1 1 ""
"""
),
dict(kwargs=dict(fill_values=[('1', 'w'), (ascii.masked, 'X')]),
out="""\
a b c
X 2 3
w w X
"""
),
dict(kwargs=dict(fill_values=[('1', 'w'), (ascii.masked, 'XXX')],
formats={'a': '%4.1f'}),
out="""\
a b c
XXX 2 3
1.0 w XXX
"""
),
dict(kwargs=dict(Writer=ascii.Csv),
out="""\
a,b,c
,2,3
1,1,
"""
),
]
def check_write_table(test_def, table, fast_writer):
out = StringIO()
try:
ascii.write(table, out, fast_writer=fast_writer, **test_def['kwargs'])
except ValueError as e: # if format doesn't have a fast writer, ignore
if 'not in the list of formats with fast writers' not in str(e):
raise e
return
print('Expected:\n{}'.format(test_def['out']))
print('Actual:\n{}'.format(out.getvalue()))
assert [x.strip() for x in out.getvalue().strip().splitlines()] == [
x.strip() for x in test_def['out'].strip().splitlines()]
def check_write_table_via_table(test_def, table, fast_writer):
out = StringIO()
test_def = copy.deepcopy(test_def)
if 'Writer' in test_def['kwargs']:
format = 'ascii.{0}'.format(test_def['kwargs']['Writer']._format_name)
del test_def['kwargs']['Writer']
else:
format = 'ascii'
try:
table.write(out, format=format, fast_writer=fast_writer, **test_def['kwargs'])
except ValueError as e: # if format doesn't have a fast writer, ignore
if 'not in the list of formats with fast writers' not in str(e):
raise e
return
print('Expected:\n{}'.format(test_def['out']))
print('Actual:\n{}'.format(out.getvalue()))
assert [x.strip() for x in out.getvalue().strip().splitlines()] == [
x.strip() for x in test_def['out'].strip().splitlines()]
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_table(fast_writer):
table = ascii.get_reader(Reader=ascii.Daophot)
data = table.read('t/daophot.dat')
for test_def in test_defs:
check_write_table(test_def, data, fast_writer)
check_write_table_via_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_values(fast_writer):
data = ascii.read(tab_to_fill)
for test_def in test_defs_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_masked_different(fast_writer):
'''see discussion in #2255'''
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data['a'].mask = [True, False]
data['c'].mask = [False, True]
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_no_data_ipac(fast_writer):
"""Write an IPAC table that contains no data."""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('t/no_data_ipac.dat')
for test_def in test_defs_no_data:
check_write_table(test_def, data, fast_writer)
check_write_table_via_table(test_def, data, fast_writer)
def test_write_invalid_toplevel_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored in the top-level metadata and therefore should
raise a warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('t/no_data_ipac.dat')
data.meta['blah'] = 'extra'
with catch_warnings(AstropyWarning) as ASwarn:
out = StringIO()
data.write(out, format='ascii.ipac')
assert len(ASwarn) == 1
assert "were not written" in str(ASwarn[0].message)
def test_write_invalid_keyword_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored appropriately in the ``keywords`` section
of the metadata but with invalid format and therefore should raise a
warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('t/no_data_ipac.dat')
data.meta['keywords']['blah'] = 'invalid'
with catch_warnings(AstropyWarning) as ASwarn:
out = StringIO()
data.write(out, format='ascii.ipac')
assert len(ASwarn) == 1
assert "has been skipped" in str(ASwarn[0].message)
def test_write_valid_meta_ipac():
"""Write an IPAC table that contains no data and has *correctly* specified
metadata. No warnings should be issued"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('t/no_data_ipac.dat')
data.meta['keywords']['blah'] = {'value': 'invalid'}
with catch_warnings(AstropyWarning) as ASwarn:
out = StringIO()
data.write(out, format='ascii.ipac')
assert len(ASwarn) == 0
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_comments(fast_writer):
"""Write comments in output originally read by io.ascii."""
data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1,2,3')
out = StringIO()
ascii.write(data, out, format='basic', fast_writer=fast_writer)
expected = ['# c1', '# c2', '# c3', 'a b c', '1 2 3']
assert out.getvalue().splitlines() == expected
# header comes before comments for commented-header
out = StringIO()
ascii.write(data, out, format='commented_header', fast_writer=fast_writer)
expected = ['# a b c', '# c1', '# c2', '# c3', '1 2 3']
assert out.getvalue().splitlines() == expected
# setting comment=False should disable comment writing
out = StringIO()
ascii.write(data, out, format='basic', comment=False, fast_writer=fast_writer)
expected = ['a b c', '1 2 3']
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize("fmt", ['%0.1f', '.1f', '0.1f', '{0:0.1f}'])
def test_write_format(fast_writer, fmt):
"""Check different formats for a column."""
data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1.11,2.22,3.33')
out = StringIO()
expected = ['# c1', '# c2', '# c3', 'a b c', '1.1 2.22 3.33']
data['a'].format = fmt
ascii.write(data, out, format='basic', fast_writer=fast_writer)
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
def test_strip_names(fast_writer):
"""Names should be stripped of whitespace by default."""
data = table.Table([[1], [2], [3]], names=(' A', 'B ', ' C '))
out = StringIO()
ascii.write(data, out, format='csv', fast_writer=fast_writer)
assert out.getvalue().splitlines()[0] == 'A,B,C'
def test_latex_units():
"""
Check to make sure that Latex and AASTex writers attempt to fall
back on the **unit** attribute of **Column** if the supplied
**latexdict** does not specify units.
"""
t = table.Table([table.Column(name='date', data=['a','b']),
table.Column(name='NUV exp.time', data=[1,2])])
latexdict = copy.deepcopy(ascii.latexdicts['AA'])
latexdict['units'] = {'NUV exp.time':'s'}
out = StringIO()
expected = '''\
\\begin{table}{cc}
\\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}}
\\startdata
a & 1 \\\\
b & 2
\\enddata
\\end{table}
'''.replace('\n', os.linesep)
ascii.write(t, out, format='aastex', latexdict=latexdict)
assert out.getvalue() == expected
# use unit attribute instead
t['NUV exp.time'].unit = units.s
t['date'].unit = units.yr
out = StringIO()
ascii.write(t, out, format='aastex', latexdict=ascii.latexdicts['AA'])
assert out.getvalue() == expected.replace(
'colhead{s}', r'colhead{$\mathrm{s}$}').replace(
'colhead{ }', r'colhead{$\mathrm{yr}$}')
@pytest.mark.parametrize("fast_writer", [True, False])
def test_commented_header_comments(fast_writer):
"""
Test the fix for #3562 with confusing exception using comment=False
for the commented_header writer.
"""
t = table.Table([[1, 2]])
with pytest.raises(ValueError) as err:
out = StringIO()
ascii.write(t, out, format='commented_header', comment=False,
fast_writer=fast_writer)
assert "for the commented_header writer you must supply a string" in str(err.value)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_byte_string_output(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([['Hello', 'World']], dtype=['S10'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0', 'Hello', 'World']
@pytest.mark.parametrize('names, include_names, exclude_names, formats, issues_warning', [
(['x', 'y'], ['x', 'y'], ['x'], {'x':'%d', 'y':'%f'}, True),
(['x', 'y'], ['x', 'y'], ['y'], {'x':'%d'}, False),
(['x', 'y'], ['x', 'y'], [], {'p':'%d', 'q':'%f'}, True),
(['x', 'y'], ['x', 'y'], [], {'z':'%f'}, True),
(['x', 'y'], ['x', 'y'], [], {'x':'%d'}, False),
(['x', 'y'], ['x', 'y'], [], {'p':'%d', 'y':'%f'}, True),
(['x', 'y'], ['x', 'y'], [], {}, False)
])
def test_names_with_formats(names, include_names, exclude_names, formats, issues_warning):
"""Test for #4508."""
t = table.Table([[1,2,3],[4.1,5.2,6.3]])
with catch_warnings(AstropyWarning) as ASwarn:
out = StringIO()
ascii.write(t, out, names=names, include_names=include_names,
exclude_names=exclude_names, formats=formats)
assert (issues_warning == (len(ASwarn) == 1))
@pytest.mark.parametrize('formats, issues_warning', [
({'p':'%d', 'y':'%f'}, True),
({'x':'%d', 'y':'%f'}, True),
({'z':'%f'}, True),
({}, False)
])
def test_columns_names_with_formats(formats, issues_warning):
"""Test the fix for #4508."""
t = table.Table([[1,2,3],[4.1,5.2,6.3]])
with catch_warnings(AstropyWarning) as ASwarn:
out = StringIO()
ascii.write(t, out,formats=formats)
assert (issues_warning == (len(ASwarn) == 1))
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_quoted_empty_field(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0 col1', 'Hello ""', '"" ""']
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer, delimiter=',')
assert out.getvalue().splitlines() == ['col0,col1', 'Hello,', ',']
@pytest.mark.parametrize("format", ['ascii', 'csv', 'html', 'latex',
'ascii.fixed_width', 'html'])
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_overwrite_ascii(format, fast_writer, tmpdir):
"""Test overwrite argument for various ASCII writers"""
filename = tmpdir.join("table-tmp.dat").strpath
open(filename, 'w').close()
t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10'])
with pytest.raises(IOError) as err:
t.write(filename, overwrite=False, format=format,
fast_writer=fast_writer)
assert str(err.value).endswith('already exists')
with catch_warnings(AstropyDeprecationWarning) as warning:
t.write(filename, format=format, fast_writer=fast_writer)
assert len(warning) == 1
assert str(warning[0].message).endswith(
"Automatically overwriting ASCII files is deprecated. "
"Use the argument 'overwrite=True' in the future.")
t.write(filename, overwrite=True, format=format,
fast_writer=fast_writer)
# If the output is a file object, overwrite is ignored
with open(filename, 'w') as fp:
t.write(fp, format=format,
fast_writer=fast_writer)
t.write(fp, overwrite=False, format=format,
fast_writer=fast_writer)
t.write(fp, overwrite=True, format=format,
fast_writer=fast_writer)
fmt_name_classes = list(chain(ascii.core.FAST_CLASSES.items(),
ascii.core.FORMAT_CLASSES.items()))
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_roundtrip_masked(fmt_name_class):
"""
Round trip a simple masked table through every writable format and confirm
that reading back gives the same result.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, '_io_registry_can_write', True):
return
# Skip tests for fixed_width or HTML without bs4
if ((fmt_name == 'html' and not HAS_BEAUTIFUL_SOUP)
or fmt_name == 'fixed_width'):
return
t = simple_table(masked=True)
out = StringIO()
fast = fmt_name in ascii.core.FAST_CLASSES
try:
ascii.write(t, out, format=fmt_name, fast_writer=fast)
except ImportError: # Some failed dependency, e.g. PyYAML, skip test
return
# No-header formats need to be told the column names
kwargs = {'names': t.colnames} if 'no_header' in fmt_name else {}
t2 = ascii.read(out.getvalue(), format=fmt_name, fast_reader=fast, guess=False, **kwargs)
assert t.colnames == t2.colnames
for col, col2 in zip(t.itercols(), t2.itercols()):
assert col.dtype.kind == col2.dtype.kind
assert np.all(col == col2)
|
|
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
import uuid
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class VMUtilsV2(vmutils.VMUtils):
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Physical Disk Drive'
_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic Disk Drive'
_DVD_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic DVD Drive'
_SCSI_RES_SUBTYPE = 'Microsoft:Hyper-V:Synthetic SCSI Controller'
_IDE_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual Hard Disk'
_IDE_DVD_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic SCSI Controller'
_SERIAL_PORT_RES_SUB_TYPE = 'Microsoft:Hyper-V:Serial Port'
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_SNAPSHOT_FULL = 2
_METRIC_AGGR_CPU_AVG = 'Aggregated Average CPU Utilization'
_METRIC_ENABLED = 2
_STORAGE_ALLOC_SETTING_DATA_CLASS = 'Msvm_StorageAllocationSettingData'
_ETHERNET_PORT_ALLOCATION_SETTING_DATA_CLASS = \
'Msvm_EthernetPortAllocationSettingData'
_AUTOMATIC_STARTUP_ACTION_NONE = 2
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4,
constants.HYPERV_VM_STATE_REBOOT: 11,
constants.HYPERV_VM_STATE_PAUSED: 9,
constants.HYPERV_VM_STATE_SUSPENDED: 6}
def __init__(self, host='.'):
super(VMUtilsV2, self).__init__(host)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
def list_instance_notes(self):
instance_notes = []
for vs in self._conn.Msvm_VirtualSystemSettingData(
['ElementName', 'Notes'],
VirtualSystemType=self._VIRTUAL_SYSTEM_TYPE_REALIZED):
instance_notes.append((vs.ElementName, [v for v in vs.Notes if v]))
return instance_notes
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
return [v.ElementName for v in
self._conn.Msvm_VirtualSystemSettingData(
['ElementName'],
VirtualSystemType=self._VIRTUAL_SYSTEM_TYPE_REALIZED)]
def _create_vm_obj(self, vs_man_svc, vm_name, notes, dynamic_memory_ratio):
vs_data = self._conn.Msvm_VirtualSystemSettingData.new()
vs_data.ElementName = vm_name
vs_data.Notes = notes
# Don't start automatically on host boot
vs_data.AutomaticStartupAction = self._AUTOMATIC_STARTUP_ACTION_NONE
# vNUMA and dynamic memory are mutually exclusive
if dynamic_memory_ratio > 1:
vs_data.VirtualNumaEnabled = False
(job_path,
vm_path,
ret_val) = vs_man_svc.DefineSystem(ResourceSettings=[],
ReferenceConfiguration=None,
SystemSettings=vs_data.GetText_(1))
job = self.check_ret_val(ret_val, job_path)
if not vm_path and job:
vm_path = job.associators(self._AFFECTED_JOB_ELEMENT_CLASS)[0]
return self._get_wmi_obj(vm_path)
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
"""Create an IDE drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
if drive_type == constants.IDE_DISK:
res_sub_type = self._DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._DVD_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
# Set the IDE ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
drive.AddressOnParent = drive_addr
# Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.IDE_DISK:
res_sub_type = self._IDE_DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._IDE_DVD_RES_SUB_TYPE
res = self._get_new_resource_setting_data(
res_sub_type, self._STORAGE_ALLOC_SETTING_DATA_CLASS)
res.Parent = drive_path
res.HostResource = [path]
self._add_virt_resource(res, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.AddressOnParent = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def _get_disk_resource_address(self, disk_resource):
return disk_resource.AddressOnParent
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_RES_SUBTYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(scsicontrl, vm.path_())
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.HostResource
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
# Remove the VM. It does not destroy any associated virtual disk.
(job_path, ret_val) = vs_man_svc.DestroySystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddResourceSettings(vm_path, res_xml)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path,
out_res_setting_data,
ret_val) = vs_man_svc.ModifyResourceSettings(
ResourceSettings=[res_setting_data.GetText_(1)])
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveResourceSettings(res_path)
self.check_ret_val(ret_val, job_path)
def get_vm_state(self, vm_name):
settings = self.get_vm_summary_info(vm_name)
return settings['EnabledState']
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_snap_svc = self._conn.Msvm_VirtualSystemSnapshotService()[0]
(job_path, snp_setting_data, ret_val) = vs_snap_svc.CreateSnapshot(
AffectedSystem=vm.path_(),
SnapshotType=self._SNAPSHOT_FULL)
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_snap_svc = self._conn.Msvm_VirtualSystemSnapshotService()[0]
(job_path, ret_val) = vs_snap_svc.DestroySnapshot(snapshot_path)
self.check_ret_val(ret_val, job_path)
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
eth_port_data = self._get_new_setting_data(
self._ETHERNET_PORT_ALLOCATION_SETTING_DATA_CLASS)
eth_port_data.HostResource = [vswitch_conn_data]
eth_port_data.Parent = nic_data.path_()
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(eth_port_data, vm.path_())
def enable_vm_metrics_collection(self, vm_name):
metric_names = [self._METRIC_AGGR_CPU_AVG]
vm = self._lookup_vm_check(vm_name)
metric_svc = self._conn.Msvm_MetricService()[0]
(disks, volumes) = self._get_vm_disks(vm)
filtered_disks = [d for d in disks if
d.ResourceSubType is not self._IDE_DVD_RES_SUB_TYPE]
# enable metrics for disk.
for disk in filtered_disks:
self._enable_metrics(metric_svc, disk)
for metric_name in metric_names:
metric_def = self._conn.CIM_BaseMetricDefinition(Name=metric_name)
if not metric_def:
LOG.debug("Metric not found: %s", metric_name)
else:
self._enable_metrics(metric_svc, vm, metric_def[0].path_())
def _enable_metrics(self, metric_svc, element, definition_path=None):
metric_svc.ControlMetrics(
Subject=element.path_(),
Definition=definition_path,
MetricCollectionEnabled=self._METRIC_ENABLED)
|
|
import calendar
import datetime as dt
import inspect
import re
import time
from collections import defaultdict
from contextlib import contextmanager
from types import FunctionType
import param
import bokeh
import numpy as np
from bokeh.core.json_encoder import serialize_json # noqa (API import)
from bokeh.core.validation import silence
from bokeh.layouts import WidgetBox, Row, Column
from bokeh.models import tools
from bokeh.models import (
Model, ToolbarBox, FactorRange, Range1d, Plot, Spacer, CustomJS,
GridBox, DatetimeAxis, CategoricalAxis
)
from bokeh.models.formatters import (
FuncTickFormatter, TickFormatter, PrintfTickFormatter
)
from bokeh.models.widgets import DataTable, Tabs, Div
from bokeh.plotting import Figure
from bokeh.themes.theme import Theme
try:
from bokeh.themes import built_in_themes
except:
built_in_themes = {}
from ...core.ndmapping import NdMapping
from ...core.overlay import Overlay
from ...core.util import (
LooseVersion, arraylike_types, callable_name, cftime_types,
cftime_to_timestamp, isnumeric, pd, unique_array
)
from ...core.spaces import get_nested_dmaps, DynamicMap
from ..util import dim_axis_label
bokeh_version = LooseVersion(bokeh.__version__) # noqa
TOOL_TYPES = {
'pan': tools.PanTool,
'xpan': tools.PanTool,
'ypan': tools.PanTool,
'xwheel_pan': tools.WheelPanTool,
'ywheel_pan': tools.WheelPanTool,
'wheel_zoom': tools.WheelZoomTool,
'xwheel_zoom': tools.WheelZoomTool,
'ywheel_zoom': tools.WheelZoomTool,
'zoom_in': tools.ZoomInTool,
'xzoom_in': tools.ZoomInTool,
'yzoom_in': tools.ZoomInTool,
'zoom_out': tools.ZoomOutTool,
'xzoom_out': tools.ZoomOutTool,
'yzoom_out': tools.ZoomOutTool,
'click': tools.TapTool,
'tap': tools.TapTool,
'crosshair': tools.CrosshairTool,
'box_select': tools.BoxSelectTool,
'xbox_select': tools.BoxSelectTool,
'ybox_select': tools.BoxSelectTool,
'poly_select': tools.PolySelectTool,
'lasso_select': tools.LassoSelectTool,
'box_zoom': tools.BoxZoomTool,
'xbox_zoom': tools.BoxZoomTool,
'ybox_zoom': tools.BoxZoomTool,
'hover': tools.HoverTool,
'save': tools.SaveTool,
'undo': tools.UndoTool,
'redo': tools.RedoTool,
'reset': tools.ResetTool,
'help': tools.HelpTool,
'box_edit': tools.BoxEditTool,
'point_draw': tools.PointDrawTool,
'poly_draw': tools.PolyDrawTool,
'poly_edit': tools.PolyEditTool,
'freehand_draw': tools.FreehandDrawTool
}
def convert_timestamp(timestamp):
"""
Converts bokehJS timestamp to datetime64.
"""
datetime = dt.datetime.utcfromtimestamp(timestamp/1000.)
return np.datetime64(datetime.replace(tzinfo=None))
def prop_is_none(value):
"""
Checks if property value is None.
"""
return (value is None or
(isinstance(value, dict) and 'value' in value
and value['value'] is None))
def decode_bytes(array):
"""
Decodes an array, list or tuple of bytestrings to avoid python 3
bokeh serialization errors
"""
if (not len(array) or (isinstance(array, arraylike_types) and array.dtype.kind != 'O')):
return array
decoded = [v.decode('utf-8') if isinstance(v, bytes) else v for v in array]
if isinstance(array, np.ndarray):
return np.asarray(decoded)
elif isinstance(array, tuple):
return tuple(decoded)
return decoded
def layout_padding(plots, renderer):
"""
Pads Nones in a list of lists of plots with empty plots.
"""
widths, heights = defaultdict(int), defaultdict(int)
for r, row in enumerate(plots):
for c, p in enumerate(row):
if p is not None:
width, height = renderer.get_size(p)
widths[c] = max(widths[c], width)
heights[r] = max(heights[r], height)
expanded_plots = []
for r, row in enumerate(plots):
expanded_plots.append([])
for c, p in enumerate(row):
if p is None:
p = empty_plot(widths[c], heights[r])
elif hasattr(p, 'plot_width') and p.plot_width == 0 and p.plot_height == 0:
p.plot_width = widths[c]
p.plot_height = heights[r]
expanded_plots[r].append(p)
return expanded_plots
def compute_plot_size(plot):
"""
Computes the size of bokeh models that make up a layout such as
figures, rows, columns, widgetboxes and Plot.
"""
if isinstance(plot, GridBox):
ndmapping = NdMapping({(x, y): fig for fig, y, x in plot.children}, kdims=['x', 'y'])
cols = ndmapping.groupby('x')
rows = ndmapping.groupby('y')
width = sum([max([compute_plot_size(f)[0] for f in col]) for col in cols])
height = sum([max([compute_plot_size(f)[1] for f in row]) for row in rows])
return width, height
elif isinstance(plot, (Div, ToolbarBox)):
# Cannot compute size for Div or ToolbarBox
return 0, 0
elif isinstance(plot, (Row, Column, WidgetBox, Tabs)):
if not plot.children: return 0, 0
if isinstance(plot, Row) or (isinstance(plot, ToolbarBox) and plot.toolbar_location not in ['right', 'left']):
w_agg, h_agg = (np.sum, np.max)
elif isinstance(plot, Tabs):
w_agg, h_agg = (np.max, np.max)
else:
w_agg, h_agg = (np.max, np.sum)
widths, heights = zip(*[compute_plot_size(child) for child in plot.children])
return w_agg(widths), h_agg(heights)
elif isinstance(plot, Figure):
if plot.plot_width:
width = plot.plot_width
else:
width = plot.frame_width + plot.min_border_right + plot.min_border_left
if plot.plot_height:
height = plot.plot_height
else:
height = plot.frame_height + plot.min_border_bottom + plot.min_border_top
return width, height
elif isinstance(plot, (Plot, DataTable, Spacer)):
return plot.width, plot.height
else:
return 0, 0
def compute_layout_properties(
width, height, frame_width, frame_height, explicit_width,
explicit_height, aspect, data_aspect, responsive, size_multiplier,
logger=None):
"""
Utility to compute the aspect, plot width/height and sizing_mode
behavior.
Args:
width (int): Plot width
height (int): Plot height
frame_width (int): Plot frame width
frame_height (int): Plot frame height
explicit_width (list): List of user supplied widths
explicit_height (list): List of user supplied heights
aspect (float): Plot aspect
data_aspect (float): Scaling between x-axis and y-axis ranges
responsive (boolean): Whether the plot should resize responsively
size_multiplier (float): Multiplier for supplied plot dimensions
logger (param.Parameters): Parameters object to issue warnings on
Returns:
Returns two dictionaries one for the aspect and sizing modes,
and another for the plot dimensions.
"""
fixed_width = (explicit_width or frame_width)
fixed_height = (explicit_height or frame_height)
fixed_aspect = aspect or data_aspect
if aspect == 'square':
aspect = 1
elif aspect == 'equal':
data_aspect = 1
# Plot dimensions
height = None if height is None else int(height*size_multiplier)
width = None if width is None else int(width*size_multiplier)
frame_height = None if frame_height is None else int(frame_height*size_multiplier)
frame_width = None if frame_width is None else int(frame_width*size_multiplier)
actual_width = frame_width or width
actual_height = frame_height or height
if frame_width is not None:
width = None
if frame_height is not None:
height = None
sizing_mode = 'fixed'
if responsive:
if fixed_height and fixed_width:
responsive = False
if logger:
logger.warning("responsive mode could not be enabled "
"because fixed width and height were "
"specified.")
elif fixed_width:
height = None
sizing_mode = 'fixed' if fixed_aspect else 'stretch_height'
elif fixed_height:
width = None
sizing_mode = 'fixed' if fixed_aspect else 'stretch_width'
else:
width, height = None, None
if fixed_aspect:
if responsive == 'width':
sizing_mode = 'scale_width'
elif responsive == 'height':
sizing_mode = 'scale_height'
else:
sizing_mode = 'scale_both'
else:
if responsive == 'width':
sizing_mode = 'stretch_both'
elif responsive == 'height':
sizing_mode = 'stretch_height'
else:
sizing_mode = 'stretch_both'
if fixed_aspect:
if ((explicit_width and not frame_width) != (explicit_height and not frame_height)) and logger:
logger.warning('Due to internal constraints, when aspect and '
'width/height is set, the bokeh backend uses '
'those values as frame_width/frame_height instead. '
'This ensures the aspect is respected, but means '
'that the plot might be slightly larger than '
'anticipated. Set the frame_width/frame_height '
'explicitly to suppress this warning.')
aspect_type = 'data_aspect' if data_aspect else 'aspect'
if fixed_width and fixed_height and aspect:
if aspect == 'equal':
data_aspect = 1
elif not data_aspect:
aspect = None
if logger:
logger.warning(
"%s value was ignored because absolute width and "
"height values were provided. Either supply "
"explicit frame_width and frame_height to achieve "
"desired aspect OR supply a combination of width "
"or height and an aspect value." % aspect_type)
elif fixed_width and responsive:
height = None
responsive = False
if logger:
logger.warning("responsive mode could not be enabled "
"because fixed width and aspect were "
"specified.")
elif fixed_height and responsive:
width = None
responsive = False
if logger:
logger.warning("responsive mode could not be enabled "
"because fixed height and aspect were "
"specified.")
elif responsive == 'width':
sizing_mode = 'scale_width'
elif responsive == 'height':
sizing_mode = 'scale_height'
if responsive == 'width' and fixed_width:
responsive = False
if logger:
logger.warning("responsive width mode could not be enabled "
"because a fixed width was defined.")
if responsive == 'height' and fixed_height:
responsive = False
if logger:
logger.warning("responsive height mode could not be enabled "
"because a fixed height was defined.")
match_aspect = False
aspect_scale = 1
aspect_ratio = None
if data_aspect:
match_aspect = True
if (fixed_width and fixed_height):
frame_width, frame_height = frame_width or width, frame_height or height
elif fixed_width or not fixed_height:
height = None
elif fixed_height or not fixed_width:
width = None
aspect_scale = data_aspect
if aspect == 'equal':
aspect_scale = 1
elif responsive:
aspect_ratio = aspect
elif (fixed_width and fixed_height):
pass
elif isnumeric(aspect):
if responsive:
aspect_ratio = aspect
elif fixed_width:
frame_width = actual_width
frame_height = int(actual_width/aspect)
width, height = None, None
else:
frame_width = int(actual_height*aspect)
frame_height = actual_height
width, height = None, None
elif aspect is not None and logger:
logger.warning('aspect value of type %s not recognized, '
'provide a numeric value, \'equal\' or '
'\'square\'.')
return ({'aspect_ratio': aspect_ratio,
'aspect_scale': aspect_scale,
'match_aspect': match_aspect,
'sizing_mode' : sizing_mode},
{'frame_width' : frame_width,
'frame_height': frame_height,
'plot_height' : height,
'plot_width' : width})
@contextmanager
def silence_warnings(*warnings):
"""
Context manager for silencing bokeh validation warnings.
"""
for warning in warnings:
silence(warning)
try:
yield
finally:
for warning in warnings:
silence(warning, False)
def empty_plot(width, height):
"""
Creates an empty and invisible plot of the specified size.
"""
return Spacer(width=width, height=height)
def remove_legend(plot, legend):
"""
Removes a legend from a bokeh plot.
"""
valid_places = ['left', 'right', 'above', 'below', 'center']
plot.legend[:] = [l for l in plot.legend if l is not legend]
for place in valid_places:
place = getattr(plot, place)
if legend in place:
place.remove(legend)
def font_size_to_pixels(size):
"""
Convert a fontsize to a pixel value
"""
if size is None or not isinstance(size, str):
return
conversions = {'em': 16, 'pt': 16/12.}
val = re.findall('\d+', size)
unit = re.findall('[a-z]+', size)
if (val and not unit) or (val and unit[0] == 'px'):
return int(val[0])
elif val and unit[0] in conversions:
return (int(int(val[0]) * conversions[unit[0]]))
def make_axis(axis, size, factors, dim, flip=False, rotation=0,
label_size=None, tick_size=None, axis_height=35):
factors = list(map(dim.pprint_value, factors))
nchars = np.max([len(f) for f in factors])
ranges = FactorRange(factors=factors)
ranges2 = Range1d(start=0, end=1)
axis_label = dim_axis_label(dim)
reset = "range.setv({start: 0, end: range.factors.length})"
customjs = CustomJS(args=dict(range=ranges), code=reset)
ranges.js_on_change('start', customjs)
axis_props = {}
if label_size:
axis_props['axis_label_text_font_size'] = label_size
if tick_size:
axis_props['major_label_text_font_size'] = tick_size
tick_px = font_size_to_pixels(tick_size)
if tick_px is None:
tick_px = 8
label_px = font_size_to_pixels(label_size)
if label_px is None:
label_px = 10
rotation = np.radians(rotation)
if axis == 'x':
align = 'center'
# Adjust height to compensate for label rotation
height = int(axis_height + np.abs(np.sin(rotation)) *
((nchars*tick_px)*0.82)) + tick_px + label_px
opts = dict(x_axis_type='auto', x_axis_label=axis_label,
x_range=ranges, y_range=ranges2, plot_height=height,
plot_width=size)
else:
# Adjust width to compensate for label rotation
align = 'left' if flip else 'right'
width = int(axis_height + np.abs(np.cos(rotation)) *
((nchars*tick_px)*0.82)) + tick_px + label_px
opts = dict(y_axis_label=axis_label, x_range=ranges2,
y_range=ranges, plot_width=width, plot_height=size)
p = Figure(toolbar_location=None, tools=[], **opts)
p.outline_line_alpha = 0
p.grid.grid_line_alpha = 0
if axis == 'x':
p.align = 'end'
p.yaxis.visible = False
axis = p.xaxis[0]
if flip:
p.above = p.below
p.below = []
p.xaxis[:] = p.above
else:
p.xaxis.visible = False
axis = p.yaxis[0]
if flip:
p.right = p.left
p.left = []
p.yaxis[:] = p.right
axis.major_label_orientation = rotation
axis.major_label_text_align = align
axis.major_label_text_baseline = 'middle'
axis.update(**axis_props)
return p
def hsv_to_rgb(hsv):
"""
Vectorized HSV to RGB conversion, adapted from:
http://stackoverflow.com/questions/24852345/hsv-to-rgb-color-conversion
"""
h, s, v = (hsv[..., i] for i in range(3))
shape = h.shape
i = np.int_(h*6.)
f = h*6.-i
q = f
t = 1.-f
i = np.ravel(i)
f = np.ravel(f)
i%=6
t = np.ravel(t)
q = np.ravel(q)
s = np.ravel(s)
v = np.ravel(v)
clist = (1-s*np.vstack([np.zeros_like(f),np.ones_like(f),q,t]))*v
#0:v 1:p 2:q 3:t
order = np.array([[0,3,1],[2,0,1],[1,0,3],[1,2,0],[3,1,0],[0,1,2]])
rgb = clist[order[i], np.arange(np.prod(shape))[:,None]]
return rgb.reshape(shape+(3,))
def pad_width(model, table_padding=0.85, tabs_padding=1.2):
"""
Computes the width of a model and sets up appropriate padding
for Tabs and DataTable types.
"""
if isinstance(model, Row):
vals = [pad_width(child) for child in model.children]
width = np.max([v for v in vals if v is not None])
elif isinstance(model, Column):
vals = [pad_width(child) for child in model.children]
width = np.sum([v for v in vals if v is not None])
elif isinstance(model, Tabs):
vals = [pad_width(t) for t in model.tabs]
width = np.max([v for v in vals if v is not None])
for model in model.tabs:
model.width = width
width = int(tabs_padding*width)
elif isinstance(model, DataTable):
width = model.width
model.width = int(table_padding*width)
elif isinstance(model, (WidgetBox, Div)):
width = model.width
elif model:
width = model.plot_width
else:
width = 0
return width
def pad_plots(plots):
"""
Accepts a grid of bokeh plots in form of a list of lists and
wraps any DataTable or Tabs in a WidgetBox with appropriate
padding. Required to avoid overlap in gridplot.
"""
widths = []
for row in plots:
row_widths = []
for p in row:
width = pad_width(p)
row_widths.append(width)
widths.append(row_widths)
plots = [[WidgetBox(p, width=w) if isinstance(p, (DataTable, Tabs)) else p
for p, w in zip(row, ws)] for row, ws in zip(plots, widths)]
return plots
def filter_toolboxes(plots):
"""
Filters out toolboxes out of a list of plots to be able to compose
them into a larger plot.
"""
if isinstance(plots, list):
plots = [filter_toolboxes(plot) for plot in plots]
elif hasattr(plots, 'children'):
plots.children = [filter_toolboxes(child) for child in plots.children
if not isinstance(child, ToolbarBox)]
return plots
def py2js_tickformatter(formatter, msg=''):
"""
Uses py2js to compile a python tick formatter to JS code
"""
try:
from pscript import py2js
except ImportError:
param.main.param.warning(
msg+'Ensure pscript is installed ("conda install pscript" '
'or "pip install pscript")')
return
try:
jscode = py2js(formatter, 'formatter')
except Exception as e:
error = 'Pyscript raised an error: {0}'.format(e)
error = error.replace('%', '%%')
param.main.param.warning(msg+error)
return
args = inspect.getfullargspec(formatter).args
arg_define = 'var %s = tick;' % args[0] if args else ''
return_js = 'return formatter();\n'
jsfunc = '\n'.join([arg_define, jscode, return_js])
match = re.search('(formatter \= function flx_formatter \(.*\))', jsfunc)
return jsfunc[:match.start()] + 'formatter = function ()' + jsfunc[match.end():]
def get_tab_title(key, frame, overlay):
"""
Computes a title for bokeh tabs from the key in the overlay, the
element and the containing (Nd)Overlay.
"""
if isinstance(overlay, Overlay):
if frame is not None:
title = []
if frame.label:
title.append(frame.label)
if frame.group != frame.param.objects('existing')['group'].default:
title.append(frame.group)
else:
title.append(frame.group)
else:
title = key
title = ' '.join(title)
else:
title = ' | '.join([d.pprint_value_string(k) for d, k in
zip(overlay.kdims, key)])
return title
def get_default(model, name, theme=None):
"""
Looks up the default value for a bokeh model property.
"""
overrides = None
if theme is not None:
if isinstance(theme, str):
theme = built_in_themes[theme]
overrides = theme._for_class(model)
descriptor = model.lookup(name)
return descriptor.property.themed_default(model, name, overrides)
def filter_batched_data(data, mapping):
"""
Iterates over the data and mapping for a ColumnDataSource and
replaces columns with repeating values with a scalar. This is
purely and optimization for scalar types.
"""
for k, v in list(mapping.items()):
if isinstance(v, dict) and 'field' in v:
if 'transform' in v:
continue
v = v['field']
elif not isinstance(v, str):
continue
values = data[v]
try:
if len(unique_array(values)) == 1:
mapping[k] = values[0]
del data[v]
except:
pass
def cds_column_replace(source, data):
"""
Determine if the CDS.data requires a full replacement or simply
needs to be updated. A replacement is required if untouched
columns are not the same length as the columns being updated.
"""
current_length = [len(v) for v in source.data.values()
if isinstance(v, (list,)+arraylike_types)]
new_length = [len(v) for v in data.values() if isinstance(v, (list, np.ndarray))]
untouched = [k for k in source.data if k not in data]
return bool(untouched and current_length and new_length and current_length[0] != new_length[0])
@contextmanager
def hold_policy(document, policy, server=False):
"""
Context manager to temporary override the hold policy.
"""
if bokeh_version >= '2.4':
old_policy = document.callbacks.hold_value
document.callbacks._hold = policy
else:
old_policy = document._hold
document._hold = policy
try:
yield
finally:
if server and not old_policy:
document.unhold()
elif bokeh_version >= '2.4':
document.callbacks._hold = old_policy
else:
document._hold = old_policy
def recursive_model_update(model, props):
"""
Recursively updates attributes on a model including other
models. If the type of the new model matches the old model
properties are simply updated, otherwise the model is replaced.
"""
updates = {}
valid_properties = model.properties_with_values()
for k, v in props.items():
if isinstance(v, Model):
nested_model = getattr(model, k)
if type(v) is type(nested_model):
nested_props = v.properties_with_values(include_defaults=False)
recursive_model_update(nested_model, nested_props)
else:
try:
setattr(model, k, v)
except Exception as e:
if isinstance(v, dict) and 'value' in v:
setattr(model, k, v['value'])
else:
raise e
elif k in valid_properties and v != valid_properties[k]:
if isinstance(v, dict) and 'value' in v:
v = v['value']
updates[k] = v
model.update(**updates)
def update_shared_sources(f):
"""
Context manager to ensures data sources shared between multiple
plots are cleared and updated appropriately avoiding warnings and
allowing empty frames on subplots. Expects a list of
shared_sources and a mapping of the columns expected columns for
each source in the plots handles.
"""
def wrapper(self, *args, **kwargs):
source_cols = self.handles.get('source_cols', {})
shared_sources = self.handles.get('shared_sources', [])
doc = self.document
for source in shared_sources:
source.data.clear()
if doc:
event_obj = doc.callbacks if bokeh_version >= '2.4' else doc
event_obj._held_events = event_obj._held_events[:-1]
ret = f(self, *args, **kwargs)
for source in shared_sources:
expected = source_cols[id(source)]
found = [c for c in expected if c in source.data]
empty = np.full_like(source.data[found[0]], np.NaN) if found else []
patch = {c: empty for c in expected if c not in source.data}
source.data.update(patch)
return ret
return wrapper
def categorize_array(array, dim):
"""
Uses a Dimension instance to convert an array of values to categorical
(i.e. string) values and applies escaping for colons, which bokeh
treats as a categorical suffix.
"""
return np.array([dim.pprint_value(x) for x in array])
class periodic(object):
"""
Mocks the API of periodic Thread in hv.core.util, allowing a smooth
API transition on bokeh server.
"""
def __init__(self, document):
self.document = document
self.callback = None
self.period = None
self.count = None
self.counter = None
self._start_time = None
self.timeout = None
self._pcb = None
@property
def completed(self):
return self.counter is None
def start(self):
self._start_time = time.time()
if self.document is None:
raise RuntimeError('periodic was registered to be run on bokeh'
'server but no document was found.')
self._pcb = self.document.add_periodic_callback(self._periodic_callback, self.period)
def __call__(self, period, count, callback, timeout=None, block=False):
if isinstance(count, int):
if count < 0: raise ValueError('Count value must be positive')
elif not type(count) is type(None):
raise ValueError('Count value must be a positive integer or None')
self.callback = callback
self.period = period*1000.
self.timeout = timeout
self.count = count
self.counter = 0
return self
def _periodic_callback(self):
self.callback(self.counter)
self.counter += 1
if self.timeout is not None:
dt = (time.time() - self._start_time)
if dt > self.timeout:
self.stop()
if self.counter == self.count:
self.stop()
def stop(self):
self.counter = None
self.timeout = None
try:
self.document.remove_periodic_callback(self._pcb)
except ValueError: # Already stopped
pass
self._pcb = None
def __repr__(self):
return 'periodic(%s, %s, %s)' % (self.period,
self.count,
callable_name(self.callback))
def __str__(self):
return repr(self)
def attach_periodic(plot):
"""
Attaches plot refresh to all streams on the object.
"""
def append_refresh(dmap):
for dmap in get_nested_dmaps(dmap):
dmap.periodic._periodic_util = periodic(plot.document)
return plot.hmap.traverse(append_refresh, [DynamicMap])
def date_to_integer(date):
"""Converts support date types to milliseconds since epoch
Attempts highest precision conversion of different datetime
formats to milliseconds since the epoch (1970-01-01 00:00:00).
If datetime is a cftime with a non-standard calendar the
caveats described in hv.core.util.cftime_to_timestamp apply.
Args:
date: Date- or datetime-like object
Returns:
Milliseconds since 1970-01-01 00:00:00
"""
if pd and isinstance(date, pd.Timestamp):
try:
date = date.to_datetime64()
except:
date = date.to_datetime()
if isinstance(date, np.datetime64):
return date.astype('datetime64[ms]').astype(float)
elif isinstance(date, cftime_types):
return cftime_to_timestamp(date, 'ms')
if hasattr(date, 'timetuple'):
dt_int = calendar.timegm(date.timetuple())*1000
else:
raise ValueError('Datetime type not recognized')
return dt_int
def glyph_order(keys, draw_order=[]):
"""
Orders a set of glyph handles using regular sort and an explicit
sort order. The explicit draw order must take the form of a list
of glyph names while the keys should be glyph names with a custom
suffix. The draw order may only match subset of the keys and any
matched items will take precedence over other entries.
"""
keys = sorted(keys)
def order_fn(glyph):
matches = [item for item in draw_order if glyph.startswith(item)]
return ((draw_order.index(matches[0]), glyph) if matches else
(1e9+keys.index(glyph), glyph))
return sorted(keys, key=order_fn)
def colormesh(X, Y):
"""
Generates line paths for a quadmesh given 2D arrays of X and Y
coordinates.
"""
X1 = X[0:-1, 0:-1].ravel()
Y1 = Y[0:-1, 0:-1].ravel()
X2 = X[1:, 0:-1].ravel()
Y2 = Y[1:, 0:-1].ravel()
X3 = X[1:, 1:].ravel()
Y3 = Y[1:, 1:].ravel()
X4 = X[0:-1, 1:].ravel()
Y4 = Y[0:-1, 1:].ravel()
X = np.column_stack([X1, X2, X3, X4, X1])
Y = np.column_stack([Y1, Y2, Y3, Y4, Y1])
return X, Y
def theme_attr_json(theme, attr):
if isinstance(theme, str) and theme in built_in_themes:
return built_in_themes[theme]._json['attrs'].get(attr, {})
elif isinstance(theme, Theme):
return theme._json['attrs'].get(attr, {})
else:
return {}
def multi_polygons_data(element):
"""
Expands polygon data which contains holes to a bokeh multi_polygons
representation. Multi-polygons split by nans are expanded and the
correct list of holes is assigned to each sub-polygon.
"""
xs, ys = (element.dimension_values(kd, expanded=False) for kd in element.kdims)
holes = element.holes()
xsh, ysh = [], []
for x, y, multi_hole in zip(xs, ys, holes):
xhs = [[h[:, 0] for h in hole] for hole in multi_hole]
yhs = [[h[:, 1] for h in hole] for hole in multi_hole]
array = np.column_stack([x, y])
splits = np.where(np.isnan(array[:, :2].astype('float')).sum(axis=1))[0]
arrays = np.split(array, splits+1) if len(splits) else [array]
multi_xs, multi_ys = [], []
for i, (path, hx, hy) in enumerate(zip(arrays, xhs, yhs)):
if i != (len(arrays)-1):
path = path[:-1]
multi_xs.append([path[:, 0]]+hx)
multi_ys.append([path[:, 1]]+hy)
xsh.append(multi_xs)
ysh.append(multi_ys)
return xsh, ysh
def match_dim_specs(specs1, specs2):
"""Matches dimension specs used to link axes.
Axis dimension specs consists of a list of tuples corresponding
to each dimension, each tuple spec has the form (name, label, unit).
The name and label must match exactly while the unit only has to
match if both specs define one.
"""
if (specs1 is None or specs2 is None) or (len(specs1) != len(specs2)):
return False
for spec1, spec2 in zip(specs1, specs2):
for s1, s2 in zip(spec1, spec2):
if s1 is None or s2 is None:
continue
if s1 != s2:
return False
return True
def match_ax_type(ax, range_type):
"""
Ensure the range_type matches the axis model being matched.
"""
if isinstance(ax[0], CategoricalAxis):
return range_type == 'categorical'
elif isinstance(ax[0], DatetimeAxis):
return range_type == 'datetime'
else:
return range_type in ('auto', 'log')
def wrap_formatter(formatter, axis):
"""
Wraps formatting function or string in
appropriate bokeh formatter type.
"""
if isinstance(formatter, TickFormatter):
pass
elif isinstance(formatter, FunctionType):
msg = ('%sformatter could not be '
'converted to tick formatter. ' % axis)
jsfunc = py2js_tickformatter(formatter, msg)
if jsfunc:
formatter = FuncTickFormatter(code=jsfunc)
else:
formatter = None
else:
formatter = PrintfTickFormatter(format=formatter)
return formatter
|
|
from __future__ import print_function
import os
import sys
import math
import numpy as np
import flopy.modflow as mf
import flopy.mt3d as mt3
import flopy.seawat as swt
import flopy.utils as fu
import matplotlib.pyplot as plt
# --modify default matplotlib settings
updates = {'font.family':['Univers 57 Condensed', 'Arial'],
'mathtext.default':'regular',
'pdf.compression':0,
'pdf.fonttype':42,
'legend.fontsize':7,
'axes.labelsize':8,
'xtick.labelsize':7,
'ytick.labelsize':7}
plt.rcParams.update(updates)
cleanFiles = False
skipRuns = False
fext = 'png'
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == '-clean':
cleanFiles = True
elif basearg == '-skipruns':
skipRuns = True
elif basearg == '-pdf':
fext = 'pdf'
dirs = [os.path.join('SWI2'), os.path.join('SEAWAT')]
if cleanFiles:
print('cleaning all files')
print('excluding *.py files')
file_dict = {}
file_dict['.'] = os.listdir('.')
file_dict[dirs[0]] = os.listdir(dirs[0])
file_dict[dirs[1]] = os.listdir(dirs[1])
for key, files in list(file_dict.items()):
for f in files:
if os.path.isdir(f):
continue
if '.py' != os.path.splitext(f)[1].lower():
print(' removing...{}'.format(os.path.basename(f)))
os.remove(os.path.join(key, f))
for d in dirs:
if os.path.exists(d):
os.rmdir(d)
sys.exit(1)
# make working directories
for d in dirs:
if not os.path.exists(d):
os.mkdir(d)
modelname = 'swiex2'
mf_name = 'mf2005'
# problem data
nper = 1
perlen = 2000
nstp = 1000
nlay, nrow, ncol = 1, 1, 60
delr = 5.
nsurf = 2
x = np.arange(0.5 * delr, ncol * delr, delr)
xedge = np.linspace(0, float(ncol) * delr, len(x) + 1)
ibound = np.ones((nrow, ncol), np.int)
ibound[0, 0] = -1
# swi2 data
z0 = np.zeros((nlay, nrow, ncol), np.float)
z1 = np.zeros((nlay, nrow, ncol), np.float)
z0[0, 0, 30:38] = np.arange(-2.5, -40, -5)
z0[0, 0, 38:] = -40
z1[0, 0, 22:30] = np.arange(-2.5, -40, -5)
z1[0, 0, 30:] = -40
z = []
z.append(z0)
z.append(z1)
ssz = 0.2
isource = np.ones((nrow, ncol), 'int')
isource[0, 0] = 2
# stratified model
modelname = 'swiex2_strat'
print('creating...', modelname)
ml = mf.Modflow(modelname, version='mf2005', exe_name=mf_name, model_ws=dirs[0])
discret = mf.ModflowDis(ml, nlay=1, ncol=ncol, nrow=nrow, delr=delr, delc=1, top=0, botm=[-40.0],
nper=nper, perlen=perlen, nstp=nstp)
bas = mf.ModflowBas(ml, ibound=ibound, strt=0.05)
bcf = mf.ModflowBcf(ml, laycon=0, tran=2 * 40)
swi = mf.ModflowSwi2(ml, nsrf=nsurf, istrat=1, toeslope=0.2, tipslope=0.2, nu=[0, 0.0125, 0.025],
zeta=z, ssz=ssz, isource=isource, nsolver=1)
oc = mf.ModflowOc88(ml, save_head_every=1000)
pcg = mf.ModflowPcg(ml)
ml.write_input()
# run stratified model
if not skipRuns:
m = ml.run_model(silent=False)
# read stratified results
zetafile = os.path.join(dirs[0], '{}.zta'.format(modelname))
zobj = fu.CellBudgetFile(zetafile)
zkstpkper = zobj.get_kstpkper()
zeta = zobj.get_data(kstpkper=zkstpkper[-1], text=' ZETASRF 1')[0]
zeta2 = zobj.get_data(kstpkper=zkstpkper[-1], text=' ZETASRF 2')[0]
#
# vd model
modelname = 'swiex2_vd'
print('creating...', modelname)
ml = mf.Modflow(modelname, version='mf2005', exe_name=mf_name, model_ws=dirs[0])
discret = mf.ModflowDis(ml, nlay=1, ncol=ncol, nrow=nrow, delr=delr, delc=1, top=0, botm=[-40.0],
nper=nper, perlen=perlen, nstp=nstp)
bas = mf.ModflowBas(ml, ibound=ibound, strt=0.05)
bcf = mf.ModflowBcf(ml, laycon=0, tran=2 * 40)
swi = mf.ModflowSwi2(ml, nsrf=nsurf, istrat=0, toeslope=0.2, tipslope=0.2, nu=[0, 0, 0.025, 0.025],
zeta=z, ssz=ssz, isource=isource, nsolver=1)
oc = mf.ModflowOc88(ml, save_head_every=1000)
pcg = mf.ModflowPcg(ml)
ml.write_input()
# run vd model
if not skipRuns:
m = ml.run_model(silent=False)
# read vd model data
zetafile = os.path.join(dirs[0], '{}.zta'.format(modelname))
zobj = fu.CellBudgetFile(zetafile)
zkstpkper = zobj.get_kstpkper()
zetavd = zobj.get_data(kstpkper=zkstpkper[-1], text=' ZETASRF 1')[0]
zetavd2 = zobj.get_data(kstpkper=zkstpkper[-1], text=' ZETASRF 2')[0]
#
# seawat model
swtexe_name = 'swt_v4'
modelname = 'swiex2_swt'
print('creating...', modelname)
swt_xmax = 300.0
swt_zmax = 40.0
swt_delr = 1.0
swt_delc = 1.0
swt_delz = 0.5
swt_ncol = int(swt_xmax / swt_delr) #300
swt_nrow = 1
swt_nlay = int(swt_zmax / swt_delz) #80
print(swt_nlay, swt_nrow, swt_ncol)
swt_ibound = np.ones((swt_nlay, swt_nrow, swt_ncol), np.int)
#swt_ibound[0, swt_ncol-1, 0] = -1
swt_ibound[0, 0, 0] = -1
swt_x = np.arange(0.5 * swt_delr, swt_ncol * swt_delr, swt_delr)
swt_xedge = np.linspace(0, float(ncol) * delr, len(swt_x) + 1)
swt_top = 0.
z0 = swt_top
swt_botm = np.zeros((swt_nlay), np.float)
swt_z = np.zeros((swt_nlay), np.float)
zcell = -swt_delz / 2.0
for ilay in range(0, swt_nlay):
z0 -= swt_delz
swt_botm[ilay] = z0
swt_z[ilay] = zcell
zcell -= swt_delz
#swt_X, swt_Z = np.meshgrid(swt_x, swt_botm)
swt_X, swt_Z = np.meshgrid(swt_x, swt_z)
# mt3d
# mt3d boundary array set to all active
icbund = np.ones((swt_nlay, swt_nrow, swt_ncol), np.int)
# create initial concentrations for MT3D
sconc = np.ones((swt_nlay, swt_nrow, swt_ncol), np.float)
sconcp = np.zeros((swt_nlay, swt_ncol), np.float)
xsb = 110
xbf = 150
for ilay in range(0, swt_nlay):
for icol in range(0, swt_ncol):
if swt_x[icol] > xsb:
sconc[ilay, 0, icol] = 0.5
if swt_x[icol] > xbf:
sconc[ilay, 0, icol] = 0.0
for icol in range(0, swt_ncol):
sconcp[ilay, icol] = sconc[ilay, 0, icol]
xsb += swt_delz
xbf += swt_delz
# ssm data
itype = mt3.Mt3dSsm.itype_dict()
ssm_data = {0: [0, 0, 0, 35., itype['BAS6']]}
#print sconcp
#mt3d print times
timprs = (np.arange(5) + 1) * 2000.
nprs = len(timprs)
# MODFLOW files
ml = []
ml = mf.Modflow(modelname, version='mf2005', exe_name=swtexe_name, model_ws=dirs[1])
discret = mf.ModflowDis(ml, nrow=swt_nrow, ncol=swt_ncol, nlay=swt_nlay,
delr=swt_delr, delc=swt_delc, laycbd=0, top=swt_top, botm=swt_botm,
nper=nper, perlen=perlen, nstp=1, steady=False)
bas = mf.ModflowBas(ml, ibound=swt_ibound, strt=0.05)
lpf = mf.ModflowLpf(ml, hk=2.0, vka=2.0, ss=0.0, sy=0.0, laytyp=0, layavg=0)
oc = mf.ModflowOc88(ml, save_head_every=1, item2=[[0, 1, 0, 0]])
pcg = mf.ModflowPcg(ml)
ml.write_input()
# Create the basic MT3DMS model structure
mt = mt3.Mt3dms(modelname, 'nam_mt3dms', modflowmodel=ml,
model_ws=dirs[1]) # Coupled to modflow model 'mf'
adv = mt3.Mt3dAdv(mt, mixelm=-1, #-1 is TVD
percel=0.05,
nadvfd=0, #0 or 1 is upstream; 2 is central in space
#particle based methods
nplane=4,
mxpart=1e7,
itrack=2,
dceps=1e-4,
npl=16,
nph=16,
npmin=8,
npmax=256)
btn = mt3.Mt3dBtn(mt, icbund=1, prsity=ssz, sconc=sconc, ifmtcn=-1,
chkmas=False, nprobs=10, nprmas=10, dt0=0.0, ttsmult=1.2, ttsmax=100.0,
ncomp=1, nprs=nprs, timprs=timprs, mxstrn=1e8)
dsp = mt3.Mt3dDsp(mt, al=0., trpt=1., trpv=1., dmcoef=0.)
gcg = mt3.Mt3dGcg(mt, mxiter=1, iter1=50, isolve=3, cclose=1e-6, iprgcg=5)
ssm = mt3.Mt3dSsm(mt, stress_period_data=ssm_data)
mt.write_input()
# Create the SEAWAT model structure
mswtf = swt.Seawat(modelname, 'nam_swt', modflowmodel=ml, mt3dmodel=mt,
exe_name=swtexe_name, model_ws=dirs[1]) # Coupled to modflow model mf and mt3dms model mt
vdf = swt.SeawatVdf(mswtf, nswtcpl=1, iwtable=0, densemin=0, densemax=0, denseref=1000., denseslp=25., firstdt=1.0e-03)
mswtf.write_input()
# run seawat model
if not skipRuns:
m = mswtf.run_model(silent=False)
# read seawat model data
ucnfile = os.path.join(dirs[1], 'MT3D001.UCN')
uobj = fu.UcnFile(ucnfile)
times = uobj.get_times()
print(times)
ukstpkper = uobj.get_kstpkper()
print(ukstpkper)
c = uobj.get_data(totim=times[-1])
conc = np.zeros((swt_nlay, swt_ncol), np.float)
for icol in range(0, swt_ncol):
for ilay in range(0, swt_nlay):
conc[ilay, icol] = c[ilay, 0, icol]
#
# figure
fwid = 7.0 #6.50
fhgt = 4.5 #6.75
flft = 0.125
frgt = 0.95
fbot = 0.125
ftop = 0.925
print('creating cross-section figure...')
xsf = plt.figure(figsize=(fwid, fhgt), facecolor='w')
xsf.subplots_adjust(wspace=0.25, hspace=0.25, left=flft, right=frgt, bottom=fbot, top=ftop)
# plot initial conditions
ax = xsf.add_subplot(3, 1, 1)
ax.text(-0.075, 1.05, 'A', transform=ax.transAxes, va='center', ha='center', size='8')
#text(.975, .1, '(a)', transform = ax.transAxes, va = 'center', ha = 'center')
ax.plot([110, 150], [0, -40], 'k')
ax.plot([150, 190], [0, -40], 'k')
ax.set_xlim(0, 300)
ax.set_ylim(-40, 0)
ax.set_yticks(np.arange(-40, 1, 10))
ax.text(50, -20, 'salt', va='center', ha='center')
ax.text(150, -20, 'brackish', va='center', ha='center')
ax.text(250, -20, 'fresh', va='center', ha='center')
ax.set_ylabel('Elevation, in meters')
# plot stratified swi2 and seawat results
ax = xsf.add_subplot(3, 1, 2)
ax.text(-0.075, 1.05, 'B', transform=ax.transAxes, va='center', ha='center', size='8')
#
zp = zeta[0, 0, :]
p = (zp < 0.0) & (zp > -40.)
ax.plot(x[p], zp[p], 'b', linewidth=1.5, drawstyle='steps-mid')
zp = zeta2[0, 0, :]
p = (zp < 0.0) & (zp > -40.)
ax.plot(x[p], zp[p], 'b', linewidth=1.5, drawstyle='steps-mid')
# seawat data
cc = ax.contour(swt_X, swt_Z, conc, levels=[0.25, 0.75], colors='k', linestyles='solid', linewidths=0.75, zorder=101)
# fake figures
ax.plot([-100., -100], [-100., -100], 'b', linewidth=1.5, label='SWI2')
ax.plot([-100., -100], [-100., -100], 'k', linewidth=0.75, label='SEAWAT')
# legend
leg = ax.legend(loc='lower left', numpoints=1)
leg._drawFrame = False
# axes
ax.set_xlim(0, 300)
ax.set_ylim(-40, 0)
ax.set_yticks(np.arange(-40, 1, 10))
ax.set_ylabel('Elevation, in meters')
# plot vd model
ax = xsf.add_subplot(3, 1, 3)
ax.text(-0.075, 1.05, 'C', transform=ax.transAxes, va='center', ha='center', size='8')
dr = zeta[0, 0, :]
ax.plot(x, dr, 'b', linewidth=1.5, drawstyle='steps-mid')
dr = zeta2[0, 0, :]
ax.plot(x, dr, 'b', linewidth=1.5, drawstyle='steps-mid')
dr = zetavd[0, 0, :]
ax.plot(x, dr, 'r', linewidth=0.75, drawstyle='steps-mid')
dr = zetavd2[0, 0, :]
ax.plot(x, dr, 'r', linewidth=0.75, drawstyle='steps-mid')
# fake figures
ax.plot([-100., -100], [-100., -100], 'b', linewidth=1.5, label='SWI2 stratified option')
ax.plot([-100., -100], [-100., -100], 'r', linewidth=0.75, label='SWI2 continuous option')
# legend
leg = ax.legend(loc='lower left', numpoints=1)
leg._drawFrame = False
# axes
ax.set_xlim(0, 300)
ax.set_ylim(-40, 0)
ax.set_yticks(np.arange(-40, 1, 10))
ax.set_xlabel('Horizontal distance, in meters')
ax.set_ylabel('Elevation, in meters')
outfig = 'Figure07_swi2ex2.{0}'.format(fext)
xsf.savefig(outfig, dpi=300)
print('created...', outfig)
|
|
from common_fixtures import * # NOQA
from cattle import ApiError
import yaml
RESOURCE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/certs')
@pytest.fixture(scope='module')
def image_uuid(context):
return context.image_uuid
def test_create_env_and_svc(client, image_uuid):
env = _create_stack(client)
launch_config = {"imageUuid": image_uuid}
# create service
service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
assert service.launchConfig.healthCheck is not None
def test_activate_lb_svc(super_client, context, client, image_uuid):
context.host
env = _create_stack(client)
launch_config = {"imageUuid": image_uuid,
"ports": [8082, '910:1001']}
svc = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
assert svc.state == "inactive"
svc = client.wait_success(svc.activate(), 120)
# perform validation
svc = _validate_lb_svc_activate(env, svc, client,
['8082:8082', '910:1001'])
_validate_svc_instance_map_count(client, svc, "active", 1)
def test_deactivate_then_activate_lb_svc(super_client, new_context):
client = new_context.client
host1, host2, service, env = _activate_svc_w_scale_two(new_context,
random_str())
# 1. verify that all hosts mappings are created
_validate_svc_instance_map_count(client, service, "active", 2)
# 2. deactivate service and validate that
# the instance mappings are still around
service = client.wait_success(service.deactivate())
_validate_svc_instance_map_count(client, service, "active", 2)
# 3. activate service again
service = client.wait_success(service.activate())
assert service.state == 'active'
_validate_svc_instance_map_count(client, service, "active", 2)
def test_deactivate_then_remove_lb_svc(new_context):
client = new_context.client
host1, host2, service, env = _activate_svc_w_scale_two(new_context,
random_str())
# 1. verify that all instances are created
_validate_svc_instance_map_count(client, service, "active", 2)
# 2. deactivate service and validate that
# instances mappings are still around
# and lb still present
service = client.wait_success(service.deactivate())
_validate_svc_instance_map_count(client, service, "active", 2)
# remove service and verify that the lb is gone
client.wait_success(service.remove())
def test_remove_active_lb_svc(new_context):
client = new_context.client
host1, host2, service, env = _activate_svc_w_scale_two(new_context,
random_str())
# 1. verify that instances got created
_validate_svc_instance_map_count(client, service, "active", 2)
# 2. delete service and validate that the instance mappings are gone
client.wait_success(service.remove())
_validate_svc_instance_map_count(client, service, "active", 0)
def test_targets(super_client, client, context):
env = _create_stack(client)
# create web, db lb services
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
web_service = client. \
create_service(name=random_str() + "web",
environmentId=env.id,
launchConfig=launch_config)
web_service = client.wait_success(web_service)
db_service = client. \
create_service(name=random_str() + "db",
environmentId=env.id,
launchConfig=launch_config)
db_service = client.wait_success(db_service)
lb_launch_config = {"imageUuid": image_uuid,
"ports": [8081, '909:1001']}
lb_svc = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=lb_launch_config)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == "inactive"
lb_svc = client.wait_success(lb_svc.activate(), 120)
_validate_lb_svc_activate(env,
lb_svc, client, ['8081:8081', '909:1001'])
# map web service to lb service - early binding,
# before web service is activated
service_link = {"serviceId": web_service.id, "ports": ["a.com:90"]}
lb_svc = lb_svc.addservicelink(serviceLink=service_link)
maps = _validate_svc_instance_map_count(client, lb_svc, "active", 1)
lb_instance = _wait_for_instance_start(super_client, maps[0].instanceId)
agent_id = lb_instance.agentId
item_before = _get_config_item(super_client, agent_id)
# activate web service
web_service = client.wait_success(web_service.activate(), 120)
assert web_service.state == "active"
db_service = client.wait_success(db_service.activate(), 120)
assert db_service.state == "active"
_validate_config_item_update(super_client, item_before, agent_id)
# bind db and lb services after service is activated
item_before = _get_config_item(super_client, agent_id)
service_link = {"serviceId": db_service.id, "ports": ["a.com:90"]}
lb_svc.addservicelink(serviceLink=service_link)
_validate_config_item_update(super_client, item_before, agent_id)
_validate_add_service_link(client, lb_svc, db_service,
ports=["a.com:90"])
_validate_add_service_link(client, lb_svc, web_service,
ports=["a.com:90"])
# remove link and make sure that the target map is gone
item_before = _get_config_item(super_client, agent_id)
service_link = {"serviceId": db_service.id, "ports": ["a.com:90"]}
lb_svc.removeservicelink(serviceLink=service_link)
_validate_config_item_update(super_client, item_before, agent_id)
def test_restart_stack(client, context):
env = _create_stack(client)
# create lb and web services
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
web_service = client. \
create_service(name=random_str() + "web",
environmentId=env.id,
launchConfig=launch_config)
web_service = client.wait_success(web_service)
lb_launch_config = {"imageUuid": image_uuid,
"ports": [8051, '808:1001']}
lb_svc = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=lb_launch_config)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == "inactive"
# map web service to lb service
service_link = {"serviceId": web_service.id, "ports": ["a.com:90"]}
lb_svc = lb_svc.addservicelink(serviceLink=service_link)
env = client.wait_success(env.activateservices(), 120)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == 'active'
web_svc = client.wait_success(lb_svc)
assert web_svc.state == 'active'
env = client.wait_success(env.deactivateservices())
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == 'inactive'
web_svc = client.wait_success(web_svc)
assert web_svc.state == 'inactive'
env = client.wait_success(env.activateservices(), 120)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == 'active'
web_svc = client.wait_success(lb_svc)
assert web_svc.state == 'active'
def test_internal_lb(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
lb_launch_config = {"imageUuid": image_uuid,
"expose": [8051]}
lb_svc = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=lb_launch_config)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == "inactive"
lb_svc = client.wait_success(lb_svc.activate())
assert lb_svc.state == 'active'
def _validate_config_item_update(super_client, bf, agent_id):
wait_for(
lambda: find_one(super_client.list_config_item_status,
agentId=agent_id,
name='haproxy').requestedVersion > bf.requestedVersion
)
def _get_config_item(super_client, agent_id):
return find_one(super_client.list_config_item_status,
agentId=agent_id,
name='haproxy')
def test_target_ips(super_client, client, context):
host = context.host
user_account_id = host.accountId
env = _create_stack(client)
# create web, db lb services
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
web_ips = ["72.22.16.5", '72.22.16.6']
web_service = client. \
create_externalService(name=random_str() + "web",
environmentId=env.id,
launchConfig=launch_config,
externalIpAddresses=web_ips)
web_service = client.wait_success(web_service)
db_ips = ["192.168.0.9", '192.168.0.10']
db_service = client. \
create_externalService(name=random_str() + "db",
environmentId=env.id,
launchConfig=launch_config,
externalIpAddresses=db_ips)
db_service = client.wait_success(db_service)
lb_launch_config = {"imageUuid": image_uuid,
"ports": [1010, '111:111']}
lb_svc = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=lb_launch_config,
accountId=user_account_id)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == "inactive"
# map web service to lb service - early binding,
# before services are activated
service_link = {"serviceId": web_service.id, "ports": ["a.com:90"]}
lb_svc = lb_svc.addservicelink(serviceLink=service_link)
# activate web and lb services
lb_svc = client.wait_success(lb_svc.activate(), 120)
_validate_lb_svc_activate(env,
lb_svc, client, ['1010:1010', '111:111'])
web_service = client.wait_success(web_service.activate(), 120)
assert web_service.state == "active"
db_service = client.wait_success(db_service.activate(), 120)
assert db_service.state == "active"
maps = _validate_svc_instance_map_count(client, lb_svc, "active", 1)
lb_instance = _wait_for_instance_start(super_client, maps[0].instanceId)
agent_id = lb_instance.agentId
item_before = _get_config_item(super_client, agent_id)
# bind db and lb services after service is activated
service_link = {"serviceId": db_service.id, "ports": ["a.com:90"]}
lb_svc.addservicelink(serviceLink=service_link)
_validate_config_item_update(super_client, item_before, agent_id)
# remove link and make sure that the db targets are gone
item_before = _get_config_item(super_client, agent_id)
service_link = {"serviceId": db_service.id, "ports": ["a.com:90"]}
lb_svc.removeservicelink(serviceLink=service_link)
_validate_config_item_update(super_client, item_before, agent_id)
# remove web service and validate that the web targets are gone
item_before = _get_config_item(super_client, agent_id)
client.wait_success(web_service.remove())
_validate_config_item_update(super_client, item_before, agent_id)
def test_create_svc_with_lb_config(context, client):
name = random_str()
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
app_policy = {"name": "policy1", "cookie": "cookie1",
"maxLength": 4, "prefix": "true",
"requestLearn": "false", "timeout": 10,
"mode": "query_string"}
lb_policy = {"name": "policy2", "cookie": "cookie1",
"domain": ".test.com", "indirect": "true",
"nocache": "true", "postonly": "true",
"mode": "insert"}
lb_config = {"name": name,
"appCookieStickinessPolicy": app_policy,
"lbCookieStickinessPolicy": lb_policy}
# create service
service = client. \
create_loadBalancerService(name=name,
environmentId=env.id,
launchConfig=launch_config,
loadBalancerConfig=lb_config)
service = client.wait_success(service)
assert service.state == "inactive"
assert service.loadBalancerConfig is not None
# verify the load balancer config info
config = service.loadBalancerConfig
assert config.appCookieStickinessPolicy is not None
assert config.appCookieStickinessPolicy.name == "policy1"
assert config.appCookieStickinessPolicy.cookie == "cookie1"
assert config.appCookieStickinessPolicy.maxLength == 4
assert config.appCookieStickinessPolicy.prefix is True
assert config.appCookieStickinessPolicy.requestLearn is False
assert config.appCookieStickinessPolicy.timeout == 10
assert config.appCookieStickinessPolicy.mode == "query_string"
assert config.lbCookieStickinessPolicy is not None
assert config.lbCookieStickinessPolicy.name == "policy2"
assert config.lbCookieStickinessPolicy.cookie == "cookie1"
assert config.lbCookieStickinessPolicy.domain == ".test.com"
assert config.lbCookieStickinessPolicy.indirect is True
assert config.lbCookieStickinessPolicy.nocache is True
assert config.lbCookieStickinessPolicy.postonly is True
assert config.lbCookieStickinessPolicy.mode == "insert"
def test_scale(new_context):
client = new_context.client
register_simulated_host(new_context)
env = _create_stack(client)
image_uuid = new_context.image_uuid
launch_config = {"imageUuid": image_uuid,
"ports": [8081, '909:1001']}
cert1 = _create_cert(client)
cert2 = _create_cert(client)
service = client.create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
defaultCertificateId=cert1.id)
service = client.wait_success(service)
assert service.state == "inactive"
# 1. verify that the service was activated
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# validate that one instance map was created
_validate_svc_instance_map_count(client, service, "active", 1)
# scale up
service = client.update(service, scale=2, name=service.name,
defaultCertificateId=cert2.id)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == 2
_validate_svc_instance_map_count(client, service, "active", 2)
instance_service_map = client \
.list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 2
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
wait_for_condition(
client, instance_service_map[1], _resource_is_active,
lambda x: 'State is: ' + x.state)
# now scale down
service = client.update(service, scale=0, name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == 0
_validate_svc_instance_map_count(client, service, "active", 0)
def test_labels(super_client, client, context):
env = _create_stack(client)
# create lb_svc with labels, and validate all of them
# plus lb_svc label were set
service_name = random_str()
initial_labels = {'affinity': "container==B", '!affinity': "container==C"}
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
"ports": [8010, '913:913'], "labels": initial_labels}
lb_svc = client. \
create_loadBalancerService(name=service_name,
environmentId=env.id,
launchConfig=launch_config)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == "inactive"
lb_svc = client.wait_success(lb_svc.activate(), 120)
lb_svc = _validate_lb_svc_activate(env, lb_svc,
client, ['8010:8010', '913:913'])
maps = _validate_svc_instance_map_count(client, lb_svc, "active", 1)
lb_instance = _wait_for_instance_start(super_client, maps[0].instanceId)
result_labels = {'affinity': "container==B", '!affinity': "container==C",
'io.rancher.stack_service.name':
env.name + "/" + service_name}
assert all(item in lb_instance.labels.items()
for item in result_labels.items()) is True
# create lb_svc w/o labels, and validate that
# only one lb_svc label was set
service_name = random_str()
launch_config = {"imageUuid": image_uuid,
"ports": [8089, '914:914']}
lb_svc = client. \
create_loadBalancerService(name=service_name,
environmentId=env.id,
launchConfig=launch_config)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == "inactive"
lb_svc = client.wait_success(lb_svc.activate(), 120)
lb_svc = _validate_lb_svc_activate(env, lb_svc,
client, ['8089:8089', '914:914'])
maps = _validate_svc_instance_map_count(client, lb_svc, "active", 1)
lb_instance = _wait_for_instance_start(super_client, maps[0].instanceId)
name = env.name + '/' + service_name
result_labels = {'io.rancher.stack_service.name': name}
assert all(item in lb_instance.labels.items()
for item in result_labels.items()) is True
def test_inactive_lb(super_client, client, context):
env = _create_stack(client)
# create and activate web service
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
web_service = client. \
create_service(name=random_str() + "web",
environmentId=env.id,
launchConfig=launch_config)
web_service = client.wait_success(web_service)
web_service = client.wait_success(web_service.activate(), 120)
assert web_service.state == "active"
web_instances = client. \
list_container(name=env.name + "_" + web_service.name + "_" + "1")
assert len(web_instances) == 1
# create lb service, but don't activate
lb_launch_config = {"imageUuid": image_uuid,
"ports": [1000]}
lb_service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=lb_launch_config)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
# map web service to lb service; validate no lb targets were created
service_link = {"serviceId": web_service.id, "ports": ["a.com:90"]}
lb_service = lb_service.addservicelink(serviceLink=service_link)
# activate lb service
lb_service = client.wait_success(lb_service.activate(), 120)
assert lb_service.state == "active"
maps = _validate_svc_instance_map_count(client, lb_service, "active", 1)
lb_instance = _wait_for_instance_start(super_client, maps[0].instanceId)
agent_id = lb_instance.agentId
item_before = _get_config_item(super_client, agent_id)
# deactivate lb service, and remove service link
lb_service = client.wait_success(lb_service.deactivate(), 120)
assert lb_service.state == "inactive"
service_link = {"serviceId": web_service.id, "ports": ["a.com:90"]}
lb_service = lb_service.removeservicelink(serviceLink=service_link)
lb_service = client.wait_success(lb_service.activate(), 120)
assert lb_service.state == "active"
_validate_config_item_update(super_client, item_before, agent_id)
def test_destroy_svc_instance(super_client, context, client, image_uuid):
env = _create_stack(client)
launch_config = {"imageUuid": image_uuid,
"ports": [95, '94:94']}
service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate(), 120)
# perform validation
service = _validate_lb_svc_activate(env, service,
client, ['94:94', '95:95'])
maps = _validate_svc_instance_map_count(client, service, "active", 1)
instance = _wait_for_instance_start(super_client, maps[0].instanceId)
client.wait_success(client.delete(instance))
_validate_svc_instance_map_count(client, service, "active", 0)
client.wait_success(service)
_validate_svc_instance_map_count(client, service, "active", 1)
def test_set_service_links(client, context):
env1 = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
lb_service = client.create_loadBalancerService(name="lb",
environmentId=env1.id,
launchConfig=launch_config)
lb_service = client.wait_success(lb_service)
service2 = client.create_service(name=random_str(),
environmentId=env1.id,
launchConfig=launch_config)
service2 = client.wait_success(service2)
service3 = client.create_service(name=random_str(),
environmentId=env1.id,
launchConfig=launch_config)
service3 = client.wait_success(service3)
# set service2, service3 links for lb service
service_link1 = {"serviceId": service2.id, "ports": ["a.com:90"]}
service_link2 = {"serviceId": service3.id, "ports": ["a.com:90"]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link1, service_link2])
_validate_add_service_link(client, lb_service, service2,
ports=["a.com:90"])
_validate_add_service_link(client, lb_service, service3,
ports=["a.com:90"])
# update the link with new ports
service_link1 = {"serviceId": service2.id, "ports": ["a.com:100"]}
service_link2 = {"serviceId": service3.id, "ports": ["a.com:101"]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link1, service_link2])
_validate_add_service_link(client, lb_service, service2,
ports=["a.com:100"])
_validate_add_service_link(client, lb_service, service3,
ports=["a.com:101"])
# remove link for service3 from the list of links
service_link = {"serviceId": service2.id, "ports": ["a.com:100"]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link])
_validate_remove_service_link(client, lb_service, service3, 1)
# try to set duplicated service links
with pytest.raises(ApiError) as e:
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link, service_link])
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
assert e.value.error.fieldName == 'serviceId'
# set empty service link set
lb_service = lb_service.setservicelinks(serviceLinks=[])
_validate_remove_service_link(client, lb_service, service2, 1)
def test_modify_link(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
lb_service = client.create_loadBalancerService(name="lb",
environmentId=env.id,
launchConfig=launch_config)
lb_service = client.wait_success(lb_service)
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
# set service link with hostname 1
service_link = {"serviceId": service.id, "ports": ["a.com:90"]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link])
_validate_add_service_link(client, lb_service, service, ports=["a.com:90"])
# update the link with new ports
service_link = {"serviceId": service.id, "ports": ["b.com:100"]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link])
_validate_add_service_link(client, lb_service,
service, ports=["b.com:100"])
def _create_service(client, env, launch_config, name=None):
if name:
svc_name = name
else:
svc_name = random_str()
service1 = client.create_service(name=svc_name,
environmentId=env.id,
launchConfig=launch_config)
service1 = client.wait_success(service1)
return service1
def test_create_links(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
lb_service = client.create_loadBalancerService(name="lb",
environmentId=env.id,
launchConfig=launch_config)
lb_service = client.wait_success(lb_service)
service1 = _create_service(client, env, launch_config)
service2 = _create_service(client, env, launch_config)
service3 = _create_service(client, env, launch_config)
service4 = _create_service(client, env, launch_config)
service5 = _create_service(client, env, launch_config)
service6 = _create_service(client, env, launch_config)
service7 = _create_service(client, env, launch_config)
service8 = _create_service(client, env, launch_config)
service9 = _create_service(client, env, launch_config)
service10 = _create_service(client, env, launch_config)
service11 = _create_service(client, env, launch_config)
service12 = _create_service(client, env, launch_config)
service13 = _create_service(client, env, launch_config)
service14 = _create_service(client, env, launch_config)
service15 = _create_service(client, env, launch_config)
service16 = _create_service(client, env, launch_config)
# set service link with hostname 1
port1 = "example.com:80/path=81"
port2 = "example.com"
port3 = "example.com:80"
port4 = "example.com:80/path"
port5 = "example.com:80=81"
port6 = "example.com/path"
port7 = "example.com/path=81"
port8 = "example.com=81"
port9 = "80/path"
port10 = "80/path=81"
port11 = "80=81"
port12 = "/path"
port13 = "/path=81"
port14 = "81"
port15 = "example.com/path1/path2/path3=81"
# old style
port16 = "90:a.com/hello"
service_link1 = {"serviceId": service1.id, "ports": [port1]}
service_link2 = {"serviceId": service2.id, "ports": [port2]}
service_link3 = {"serviceId": service3.id, "ports": [port3]}
service_link4 = {"serviceId": service4.id, "ports": [port4]}
service_link5 = {"serviceId": service5.id, "ports": [port5]}
service_link6 = {"serviceId": service6.id, "ports": [port6]}
service_link7 = {"serviceId": service7.id, "ports": [port7]}
service_link8 = {"serviceId": service8.id, "ports": [port8]}
service_link9 = {"serviceId": service9.id, "ports": [port9]}
service_link10 = {"serviceId": service10.id, "ports": [port10]}
service_link11 = {"serviceId": service11.id, "ports": [port11]}
service_link12 = {"serviceId": service12.id, "ports": [port12]}
service_link13 = {"serviceId": service13.id, "ports": [port13]}
service_link14 = {"serviceId": service14.id, "ports": [port14]}
service_link15 = {"serviceId": service15.id, "ports": [port15]}
service_link16 = {"serviceId": service16.id, "ports": [port16]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link1, service_link2,
service_link3, service_link4,
service_link5, service_link6,
service_link7, service_link8,
service_link9, service_link10,
service_link11, service_link12,
service_link13, service_link14,
service_link15, service_link16])
_validate_add_service_link(client, lb_service, service1, ports=[port1])
_validate_add_service_link(client, lb_service, service2, ports=[port2])
_validate_add_service_link(client, lb_service, service3, ports=[port3])
_validate_add_service_link(client, lb_service, service4, ports=[port4])
_validate_add_service_link(client, lb_service, service5, ports=[port5])
_validate_add_service_link(client, lb_service, service6, ports=[port6])
_validate_add_service_link(client, lb_service, service7, ports=[port7])
_validate_add_service_link(client, lb_service, service8, ports=[port8])
_validate_add_service_link(client, lb_service, service9, ports=[port9])
_validate_add_service_link(client, lb_service, service10, ports=[port10])
_validate_add_service_link(client, lb_service, service11, ports=[port11])
_validate_add_service_link(client, lb_service, service12, ports=[port12])
_validate_add_service_link(client, lb_service, service13, ports=[port13])
_validate_add_service_link(client, lb_service, service14, ports=[port14])
_validate_add_service_link(client, lb_service, service15, ports=[port15])
_validate_add_service_link(client, lb_service, service16, ports=[port16])
service_link1 = {"serviceId": service1.id, "ports": ["90=100=100"]}
with pytest.raises(ApiError) as e:
lb_service. \
setservicelinks(serviceLinks=[service_link1])
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidPort'
service_link1 = {"serviceId": service1.id, "ports": ["a.com:b.com:80"]}
with pytest.raises(ApiError) as e:
lb_service. \
setservicelinks(serviceLinks=[service_link1])
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidPort'
def test_export_config(client, context):
env1 = _create_stack(client)
env2 = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
web_service = _create_service(client, env1, launch_config, "web")
web_service1 = _create_service(client, env1, launch_config, "web1")
web_external = _create_service(client, env2, launch_config, "web2")
lb_launch_config = {"imageUuid": image_uuid,
"ports": [8081, '909:1001']}
lb_service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env1.id,
launchConfig=lb_launch_config)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
# map web services
service_link = {"serviceId": web_service.id,
"ports": ["a.com:90"], "name": "test"}
service_link1 = {"serviceId": web_service1.id}
service_link_ext = {"serviceId": web_external.id, "ports": ["a.com:90"]}
lb_service = lb_service.addservicelink(serviceLink=service_link)
lb_service = lb_service.addservicelink(serviceLink=service_link1)
lb_service = lb_service.addservicelink(serviceLink=service_link_ext)
compose_config = env1.exportconfig()
assert compose_config is not None
document = yaml.load(compose_config.dockerComposeConfig)
assert len(document[lb_service.name]['links']) == 2
assert len(document[lb_service.name]['external_links']) == 1
assert len(document[lb_service.name]['labels']) == 2
labels = {"io.rancher.loadbalancer.target.web": "a.com:90",
"io.rancher.loadbalancer.target."
+ env2.name + "/web2": "a.com:90"}
links = ["web:web", "web1:web1"]
external_links = [env2.name + "/web2:web2"]
assert document[lb_service.name]['labels'] == labels
assert document[lb_service.name]['links'] == links
assert document[lb_service.name]['external_links'] == external_links
def test_lb_service_w_certificate(client, context, image_uuid):
env = _create_stack(client)
cert1 = _create_cert(client)
cert2 = _create_cert(client)
labels = {'io.rancher.loadbalancer.ssl.ports': "1765,1767"}
launch_config = {"imageUuid": image_uuid,
"ports": ['1765:1766', '1767:1768'],
"labels": labels}
lb_svc = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
certificateIds=[cert1.id, cert2.id],
defaultCertificateId=cert1.id)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == "inactive"
lb_svc = client.wait_success(lb_svc.activate(), 120)
# perform validation
lb_svc = _validate_lb_svc_activate(env, lb_svc, client,
['1765:1766', '1767:1768'], "https")
assert lb_svc.defaultCertificateId == cert1.id
assert lb_svc.certificateIds == [cert1.id, cert2.id]
# remove the lb_svc
lb_svc = client.wait_success(lb_svc.remove())
assert lb_svc.state == 'removed'
# remove the cert
cert1 = client.wait_success(cert1.remove())
assert cert1.state == 'removed'
cert2 = client.wait_success(cert2.remove())
assert cert2.state == 'removed'
def test_lb_service_update_certificate(client, context, image_uuid):
cert1 = _create_cert(client)
cert2 = _create_cert(client)
labels = {'io.rancher.loadbalancer.ssl.ports': "1769,1771"}
env = _create_stack(client)
launch_config = {"imageUuid": image_uuid,
"ports": ['1769:1770', '1771:1772'],
"labels": labels}
lb_svc = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
certificateIds=[cert1.id, cert2.id],
defaultCertificateId=cert1.id)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == "inactive"
lb_svc = client.wait_success(lb_svc.activate(), 120)
# perform validation
lb_svc = _validate_lb_svc_activate(env, lb_svc,
client, ['1769:1770', '1771:1772'],
"https")
assert lb_svc.defaultCertificateId == cert1.id
assert lb_svc.certificateIds == [cert1.id, cert2.id]
cert3 = _create_cert(client)
# update lb_svc with new certificate set
lb_svc = client.update(lb_svc, certificateIds=[cert1.id],
defaultCertificateId=cert3.id)
lb_svc = client.wait_success(lb_svc, 120)
assert lb_svc.defaultCertificateId == cert3.id
assert lb_svc.certificateIds == [cert1.id]
compose_config = env.exportconfig()
assert compose_config is not None
docker_compose = yaml.load(compose_config.dockerComposeConfig)
rancher_compose = yaml.load(compose_config.rancherComposeConfig)
assert docker_compose[lb_svc.name]['labels'] == labels
assert rancher_compose[lb_svc.name]['default_cert'] == cert3.name
assert rancher_compose[lb_svc.name]['certs'][0] == cert1.name
# don't pass certificate ids and validate that they are still set
lb_svc = client.update(lb_svc, name='newName')
lb_svc = client.wait_success(lb_svc, 120)
assert lb_svc.defaultCertificateId == cert3.id
assert lb_svc.certificateIds == [cert1.id]
# swap default and optional
lb_svc = client.update(lb_svc, certificateIds=[cert3.id],
defaultCertificateId=cert1.id)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.defaultCertificateId == cert1.id
assert lb_svc.certificateIds == [cert3.id]
# update with none certificates
lb_svc = client.update(lb_svc, certificateIds=None,
defaultCertificateId=None)
lb_svc = client.wait_success(lb_svc, 120)
assert lb_svc.defaultCertificateId is None
assert lb_svc.certificateIds is None
def test_lb_with_certs_service_update(new_context, image_uuid):
client = new_context.client
new_context.host
register_simulated_host(new_context)
cert1 = _create_cert(client)
cert2 = _create_cert(client)
labels = {'io.rancher.loadbalancer.ssl.ports': "1772,1773"}
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": image_uuid,
"ports": ['1792', '1793'],
"labels": labels}
service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
certificateIds=[cert1.id, cert2.id],
defaultCertificateId=cert1.id,
scale=2)
service = client.wait_success(service)
assert service.state == "inactive"
# 1. verify that the service was activated
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
assert service.defaultCertificateId == cert1.id
assert service.certificateIds == [cert1.id, cert2.id]
# scale down service and validate the certificates are still the same
service = client.update(service, scale=1)
service = client.wait_success(service)
assert service.state == 'active'
assert service.defaultCertificateId == cert1.id
assert service.certificateIds == [cert1.id, cert2.id]
def test_cert_in_use(client, context, image_uuid):
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
cert1 = _create_cert(client)
cert2 = _create_cert(client)
labels = {'io.rancher.loadbalancer.ssl.ports': "1765,1767"}
launch_config = {"imageUuid": image_uuid,
"ports": ['1765:1766', '1767:1768'],
"labels": labels}
lb_svc = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
certificateIds=[cert1.id, cert2.id],
defaultCertificateId=cert1.id)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.state == "inactive"
lb_svc = client.wait_success(lb_svc.activate(), 120)
# perform validation
lb_svc = _validate_lb_svc_activate(env, lb_svc,
client, ['1765:1766', '1767:1768'],
"https")
assert lb_svc.defaultCertificateId == cert1.id
assert lb_svc.certificateIds == [cert1.id, cert2.id]
# try to remove the cert - delete action (used by UI)
with pytest.raises(ApiError) as e:
client.delete(cert1)
assert e.value.error.status == 405
assert e.value.error.code == 'InvalidAction'
# try to remove the cert - remove action
with pytest.raises(ApiError) as e:
cert1.remove()
assert e.value.error.status == 405
assert e.value.error.code == 'InvalidAction'
def test_concurrent_acitvate_setlinks(client, context):
env1 = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc = _create_service(client, env1, launch_config, "web")
lb_launch_config = {"imageUuid": image_uuid,
"ports": [8777, '8778:8778']}
lb_svc = client. \
create_loadBalancerService(name=random_str(),
environmentId=env1.id,
launchConfig=lb_launch_config)
lb_svc = client.wait_success(lb_svc)
svc.activate()
lb_svc.activate()
# map web services
service_link = {"serviceId": svc.id,
"ports": ["a.com:90"], "name": "test"}
lb_svc.addservicelink(serviceLink=service_link)
lb_svc = client.wait_success(lb_svc)
svc = client.wait_success(svc)
# validate that the instance was created
_validate_svc_instance_map_count(client, lb_svc, "active", 1)
# update the link
service_link = {"serviceId": svc.id,
"ports": ["a.com:100"], "name": "test"}
lb_svc.addservicelink(serviceLink=service_link)
_validate_svc_instance_map_count(client, lb_svc, "active", 1)
def _resource_is_active(resource):
return resource.state == 'active'
def _resource_is_removed(resource):
return resource.state == 'removed'
def _validate_lb_svc_activate(env, service, client, ports, protocol=None):
# 1. verify that the service was activated
assert service.state == "active"
# 2. verify instance got created
_validate_svc_instance_map_count(client,
service, "active", service.scale)
return service
def _activate_svc_w_scale_two(new_context, random_str):
client = new_context.client
host1 = new_context.host
host2 = register_simulated_host(new_context)
env = _create_stack(client)
launch_config = {"imageUuid": new_context.image_uuid,
"ports": [8081, '909:1001']}
service = client. \
create_loadBalancerService(name=random_str,
environmentId=env.id,
launchConfig=launch_config,
scale=2)
service = client.wait_success(service)
assert service.state == "inactive"
# 1. verify that the service was activated
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
return host1, host2, service, env
def _validate_add_service_link(client, service, consumedService, ports=None):
service_maps = client. \
list_serviceConsumeMap(serviceId=service.id,
consumedServiceId=consumedService.id)
assert len(service_maps) == 1
if ports:
for value in service_maps:
if value.ports == ports:
service_map = value
break
assert service_map is not None
def _validate_remove_service_link(client, service, consumedService, count,
timeout=30):
start = time.time()
service_maps = client. \
list_serviceConsumeMap(serviceId=service.id,
consumedServiceId=consumedService.id,
state='removed')
while len(service_maps) != count:
time.sleep(.5)
service_maps = client. \
list_serviceConsumeMap(serviceId=service.id,
consumedServiceId=consumedService.id,
state='removed')
if time.time() - start > timeout:
assert 'Timeout waiting for map to be removed.'
def _create_cert(client):
cert = _read_cert("cert.pem")
key = _read_cert("key.pem")
cert1 = client. \
create_certificate(name=random_str(),
cert=cert,
key=key)
cert1 = client.wait_success(cert1)
assert cert1.state == 'active'
assert cert1.cert == cert
return cert1
def _read_cert(name):
with open(os.path.join(RESOURCE_DIR, name)) as f:
return f.read()
def _validate_svc_instance_map_count(client, service,
state, count, timeout=30):
start = time.time()
instance_service_map = client. \
list_serviceExposeMap(serviceId=service.id, state=state)
while len(instance_service_map) < count:
time.sleep(.5)
instance_service_map = client. \
list_serviceExposeMap(serviceId=service.id, state=state)
if time.time() - start > timeout:
assert 'Timeout waiting for map to be in correct state'
return instance_service_map
def _wait_for_instance_start(super_client, id):
wait_for(
lambda: len(super_client.by_id('container', id)) > 0
)
return super_client.by_id('container', id)
def _create_stack(client):
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
return env
|
|
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Image cache manager.
The cache manager implements the specification at
http://wiki.openstack.org/nova-image-cache-management.
"""
import hashlib
import os
import re
import time
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova.openstack.common import fileutils
from nova import utils
from nova.virt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
imagecache_opts = [
cfg.StrOpt('image_info_filename_pattern',
default='$instances_path/$image_cache_subdirectory_name/'
'%(image)s.info',
help='Allows image information files to be stored in '
'non-standard locations'),
cfg.BoolOpt('remove_unused_kernels',
default=True,
deprecated_for_removal=True,
help='DEPRECATED: Should unused kernel images be removed? '
'This is only safe to enable if all compute nodes have '
'been updated to support this option (running Grizzly or '
'newer level compute). This will be the default behavior '
'in the 13.0.0 release.'),
cfg.IntOpt('remove_unused_resized_minimum_age_seconds',
default=3600,
help='Unused resized base images younger than this will not be '
'removed'),
cfg.BoolOpt('checksum_base_images',
default=False,
help='Write a checksum for files in _base to disk'),
cfg.IntOpt('checksum_interval_seconds',
default=3600,
help='How frequently to checksum base images'),
]
CONF = cfg.CONF
CONF.register_opts(imagecache_opts, 'libvirt')
CONF.import_opt('instances_path', 'nova.compute.manager')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
def get_cache_fname(images, key):
"""Return a filename based on the SHA1 hash of a given image ID.
Image files stored in the _base directory that match this pattern
are considered for cleanup by the image cache manager. The cache
manager considers the file to be in use if it matches an instance's
image_ref, kernel_id or ramdisk_id property.
However, in grizzly-3 and before, only the image_ref property was
considered. This means that it's unsafe to store kernel and ramdisk
images using this pattern until we're sure that all compute nodes
are running a cache manager newer than grizzly-3. For now, we
require admins to confirm that by setting the remove_unused_kernels
boolean but, at some point in the future, we'll be safely able to
assume this.
"""
image_id = str(images[key])
if ((not CONF.libvirt.remove_unused_kernels and
key in ['kernel_id', 'ramdisk_id'])):
return image_id
else:
return hashlib.sha1(image_id).hexdigest()
def get_info_filename(base_path):
"""Construct a filename for storing additional information about a base
image.
Returns a filename.
"""
base_file = os.path.basename(base_path)
return (CONF.libvirt.image_info_filename_pattern
% {'image': base_file})
def is_valid_info_file(path):
"""Test if a given path matches the pattern for info files."""
digest_size = hashlib.sha1().digestsize * 2
regexp = (CONF.libvirt.image_info_filename_pattern
% {'image': ('([0-9a-f]{%(digest_size)d}|'
'[0-9a-f]{%(digest_size)d}_sm|'
'[0-9a-f]{%(digest_size)d}_[0-9]+)'
% {'digest_size': digest_size})})
m = re.match(regexp, path)
if m:
return True
return False
def _read_possible_json(serialized, info_file):
try:
d = jsonutils.loads(serialized)
except ValueError as e:
LOG.error(_LE('Error reading image info file %(filename)s: '
'%(error)s'),
{'filename': info_file,
'error': e})
d = {}
return d
def read_stored_info(target, field=None, timestamped=False):
"""Read information about an image.
Returns an empty dictionary if there is no info, just the field value if
a field is requested, or the entire dictionary otherwise.
"""
info_file = get_info_filename(target)
if not os.path.exists(info_file):
# NOTE(mikal): Special case to handle essex checksums being converted.
# There is an assumption here that target is a base image filename.
old_filename = target + '.sha1'
if field == 'sha1' and os.path.exists(old_filename):
hash_file = open(old_filename)
hash_value = hash_file.read()
hash_file.close()
write_stored_info(target, field=field, value=hash_value)
os.remove(old_filename)
d = {field: hash_value}
else:
d = {}
else:
lock_name = 'info-%s' % os.path.split(target)[-1]
lock_path = os.path.join(CONF.instances_path, 'locks')
@utils.synchronized(lock_name, external=True, lock_path=lock_path)
def read_file(info_file):
LOG.debug('Reading image info file: %s', info_file)
with open(info_file, 'r') as f:
return f.read().rstrip()
serialized = read_file(info_file)
d = _read_possible_json(serialized, info_file)
if field:
if timestamped:
return (d.get(field, None), d.get('%s-timestamp' % field, None))
else:
return d.get(field, None)
return d
def write_stored_info(target, field=None, value=None):
"""Write information about an image."""
if not field:
return
info_file = get_info_filename(target)
LOG.info(_LI('Writing stored info to %s'), info_file)
fileutils.ensure_tree(os.path.dirname(info_file))
lock_name = 'info-%s' % os.path.split(target)[-1]
lock_path = os.path.join(CONF.instances_path, 'locks')
@utils.synchronized(lock_name, external=True, lock_path=lock_path)
def write_file(info_file, field, value):
d = {}
if os.path.exists(info_file):
with open(info_file, 'r') as f:
d = _read_possible_json(f.read(), info_file)
d[field] = value
d['%s-timestamp' % field] = time.time()
with open(info_file, 'w') as f:
f.write(jsonutils.dumps(d))
write_file(info_file, field, value)
def _hash_file(filename):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
with open(filename) as f:
for chunk in iter(lambda: f.read(32768), b''):
checksum.update(chunk)
return checksum.hexdigest()
def read_stored_checksum(target, timestamped=True):
"""Read the checksum.
Returns the checksum (as hex) or None.
"""
return read_stored_info(target, field='sha1', timestamped=timestamped)
def write_stored_checksum(target):
"""Write a checksum to disk for a file in _base."""
write_stored_info(target, field='sha1', value=_hash_file(target))
class ImageCacheManager(imagecache.ImageCacheManager):
def __init__(self):
super(ImageCacheManager, self).__init__()
self.lock_path = os.path.join(CONF.instances_path, 'locks')
self._reset_state()
def _reset_state(self):
"""Reset state variables used for each pass."""
self.used_images = {}
self.image_popularity = {}
self.instance_names = set()
self.back_swap_images = set()
self.used_swap_images = set()
self.active_base_files = []
self.corrupt_base_files = []
self.originals = []
self.removable_base_files = []
self.unexplained_images = []
def _store_image(self, base_dir, ent, original=False):
"""Store a base image for later examination."""
entpath = os.path.join(base_dir, ent)
if os.path.isfile(entpath):
self.unexplained_images.append(entpath)
if original:
self.originals.append(entpath)
def _store_swap_image(self, ent):
"""Store base swap images for later examination."""
names = ent.split('_')
if len(names) == 2 and names[0] == 'swap':
if len(names[1]) > 0 and names[1].isdigit():
LOG.debug('Adding %s into backend swap images', ent)
self.back_swap_images.add(ent)
def _list_base_images(self, base_dir):
"""Return a list of the images present in _base.
Determine what images we have on disk. There will be other files in
this directory so we only grab the ones which are the right length
to be disk images.
"""
digest_size = hashlib.sha1().digestsize * 2
for ent in os.listdir(base_dir):
if len(ent) == digest_size:
self._store_image(base_dir, ent, original=True)
elif (len(ent) > digest_size + 2 and
ent[digest_size] == '_' and
not is_valid_info_file(os.path.join(base_dir, ent))):
self._store_image(base_dir, ent, original=False)
else:
self._store_swap_image(ent)
return {'unexplained_images': self.unexplained_images,
'originals': self.originals}
def _list_backing_images(self):
"""List the backing images currently in use."""
inuse_images = []
for ent in os.listdir(CONF.instances_path):
if ent in self.instance_names:
LOG.debug('%s is a valid instance name', ent)
disk_path = os.path.join(CONF.instances_path, ent, 'disk')
if os.path.exists(disk_path):
LOG.debug('%s has a disk file', ent)
try:
backing_file = libvirt_utils.get_disk_backing_file(
disk_path)
except processutils.ProcessExecutionError:
# (for bug 1261442)
if not os.path.exists(disk_path):
LOG.debug('Failed to get disk backing file: %s',
disk_path)
continue
else:
raise
LOG.debug('Instance %(instance)s is backed by '
'%(backing)s',
{'instance': ent,
'backing': backing_file})
if backing_file:
backing_path = os.path.join(
CONF.instances_path,
CONF.image_cache_subdirectory_name,
backing_file)
if backing_path not in inuse_images:
inuse_images.append(backing_path)
if backing_path in self.unexplained_images:
LOG.warn(_LW('Instance %(instance)s is using a '
'backing file %(backing)s which '
'does not appear in the image '
'service'),
{'instance': ent,
'backing': backing_file})
self.unexplained_images.remove(backing_path)
return inuse_images
def _find_base_file(self, base_dir, fingerprint):
"""Find the base file matching this fingerprint.
Yields the name of the base file, a boolean which is True if the image
is "small", and a boolean which indicates if this is a resized image.
Note that it is possible for more than one yield to result from this
check.
If no base file is found, then nothing is yielded.
"""
# The original file from glance
base_file = os.path.join(base_dir, fingerprint)
if os.path.exists(base_file):
yield base_file, False, False
# An older naming style which can be removed sometime after Folsom
base_file = os.path.join(base_dir, fingerprint + '_sm')
if os.path.exists(base_file):
yield base_file, True, False
# Resized images
resize_re = re.compile('.*/%s_[0-9]+$' % fingerprint)
for img in self.unexplained_images:
m = resize_re.match(img)
if m:
yield img, False, True
def _verify_checksum(self, img_id, base_file, create_if_missing=True):
"""Compare the checksum stored on disk with the current file.
Note that if the checksum fails to verify this is logged, but no actual
action occurs. This is something sysadmins should monitor for and
handle manually when it occurs.
"""
if not CONF.libvirt.checksum_base_images:
return None
lock_name = 'hash-%s' % os.path.split(base_file)[-1]
# Protect against other nova-computes performing checksums at the same
# time if we are using shared storage
@utils.synchronized(lock_name, external=True, lock_path=self.lock_path)
def inner_verify_checksum():
(stored_checksum, stored_timestamp) = read_stored_checksum(
base_file, timestamped=True)
if stored_checksum:
# NOTE(mikal): Checksums are timestamped. If we have recently
# checksummed (possibly on another compute node if we are using
# shared storage), then we don't need to checksum again.
if (stored_timestamp and
time.time() - stored_timestamp <
CONF.libvirt.checksum_interval_seconds):
return True
# NOTE(mikal): If there is no timestamp, then the checksum was
# performed by a previous version of the code.
if not stored_timestamp:
write_stored_info(base_file, field='sha1',
value=stored_checksum)
current_checksum = _hash_file(base_file)
if current_checksum != stored_checksum:
LOG.error(_LE('image %(id)s at (%(base_file)s): image '
'verification failed'),
{'id': img_id,
'base_file': base_file})
return False
else:
return True
else:
LOG.info(_LI('image %(id)s at (%(base_file)s): image '
'verification skipped, no hash stored'),
{'id': img_id,
'base_file': base_file})
# NOTE(mikal): If the checksum file is missing, then we should
# create one. We don't create checksums when we download images
# from glance because that would delay VM startup.
if CONF.libvirt.checksum_base_images and create_if_missing:
LOG.info(_LI('%(id)s (%(base_file)s): generating '
'checksum'),
{'id': img_id,
'base_file': base_file})
write_stored_checksum(base_file)
return None
return inner_verify_checksum()
@staticmethod
def _get_age_of_file(base_file):
if not os.path.exists(base_file):
LOG.debug('Cannot remove %s, it does not exist', base_file)
return (False, 0)
mtime = os.path.getmtime(base_file)
age = time.time() - mtime
return (True, age)
def _remove_old_enough_file(self, base_file, maxage, remove_sig=True,
remove_lock=True):
"""Remove a single swap or base file if it is old enough."""
exists, age = self._get_age_of_file(base_file)
if not exists:
return
if age < maxage:
LOG.info(_LI('Base or swap file too young to remove: %s'),
base_file)
else:
LOG.info(_LI('Removing base or swap file: %s'), base_file)
try:
os.remove(base_file)
if remove_sig:
signature = get_info_filename(base_file)
if os.path.exists(signature):
os.remove(signature)
except OSError as e:
LOG.error(_LE('Failed to remove %(base_file)s, '
'error was %(error)s'),
{'base_file': base_file,
'error': e})
if remove_lock:
try:
# NOTE(jichenjc) The lock file will be constructed first
# time the image file was accessed. the lock file looks
# like nova-9e881789030568a317fad9daae82c5b1c65e0d4a
# or nova-03d8e206-6500-4d91-b47d-ee74897f9b4e
# according to the original file name
lock_file = os.path.split(base_file)[-1]
lockutils.remove_external_lock_file(lock_file,
lock_file_prefix='nova-', lock_path=self.lock_path)
except OSError as e:
LOG.debug('Failed to remove %(lock_file)s, '
'error was %(error)s',
{'lock_file': lock_file,
'error': e})
def _remove_swap_file(self, base_file):
"""Remove a single swap base file if it is old enough."""
maxage = CONF.remove_unused_original_minimum_age_seconds
self._remove_old_enough_file(base_file, maxage, remove_sig=False,
remove_lock=False)
def _remove_base_file(self, base_file):
"""Remove a single base file if it is old enough."""
maxage = CONF.libvirt.remove_unused_resized_minimum_age_seconds
if base_file in self.originals:
maxage = CONF.remove_unused_original_minimum_age_seconds
self._remove_old_enough_file(base_file, maxage)
def _handle_base_image(self, img_id, base_file):
"""Handle the checks for a single base image."""
image_bad = False
image_in_use = False
LOG.info(_LI('image %(id)s at (%(base_file)s): checking'),
{'id': img_id,
'base_file': base_file})
if base_file in self.unexplained_images:
self.unexplained_images.remove(base_file)
if (base_file and os.path.exists(base_file)
and os.path.isfile(base_file)):
# _verify_checksum returns True if the checksum is ok, and None if
# there is no checksum file
checksum_result = self._verify_checksum(img_id, base_file)
if checksum_result is not None:
image_bad = not checksum_result
# Give other threads a chance to run
time.sleep(0)
if img_id in self.used_images:
local, remote, instances = self.used_images[img_id]
if local > 0 or remote > 0:
image_in_use = True
LOG.info(_LI('image %(id)s at (%(base_file)s): '
'in use: on this node %(local)d local, '
'%(remote)d on other nodes sharing this instance '
'storage'),
{'id': img_id,
'base_file': base_file,
'local': local,
'remote': remote})
self.active_base_files.append(base_file)
if not base_file:
LOG.warn(_LW('image %(id)s at (%(base_file)s): warning '
'-- an absent base file is in use! '
'instances: %(instance_list)s'),
{'id': img_id,
'base_file': base_file,
'instance_list': ' '.join(instances)})
if image_bad:
self.corrupt_base_files.append(base_file)
if base_file:
if not image_in_use:
LOG.debug('image %(id)s at (%(base_file)s): image is not in '
'use',
{'id': img_id,
'base_file': base_file})
self.removable_base_files.append(base_file)
else:
LOG.debug('image %(id)s at (%(base_file)s): image is in '
'use',
{'id': img_id,
'base_file': base_file})
if os.path.exists(base_file):
libvirt_utils.chown(base_file, os.getuid())
os.utime(base_file, None)
def _age_and_verify_swap_images(self, context, base_dir):
LOG.debug('Verify swap images')
for ent in self.back_swap_images:
base_file = os.path.join(base_dir, ent)
if ent in self.used_swap_images and os.path.exists(base_file):
libvirt_utils.chown(base_file, os.getuid())
os.utime(base_file, None)
elif self.remove_unused_base_images:
self._remove_swap_file(base_file)
error_images = self.used_swap_images - self.back_swap_images
for error_image in error_images:
LOG.warn(_LW('%s swap image was used by instance'
' but no back files existing!'), error_image)
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
LOG.debug('Verify base images')
# Determine what images are on disk because they're in use
for img in self.used_images:
fingerprint = hashlib.sha1(img).hexdigest()
LOG.debug('Image id %(id)s yields fingerprint %(fingerprint)s',
{'id': img,
'fingerprint': fingerprint})
for result in self._find_base_file(base_dir, fingerprint):
base_file, image_small, image_resized = result
self._handle_base_image(img, base_file)
if not image_small and not image_resized:
self.originals.append(base_file)
# Elements remaining in unexplained_images might be in use
inuse_backing_images = self._list_backing_images()
for backing_path in inuse_backing_images:
if backing_path not in self.active_base_files:
self.active_base_files.append(backing_path)
# Anything left is an unknown base image
for img in self.unexplained_images:
LOG.warn(_LW('Unknown base file: %s'), img)
self.removable_base_files.append(img)
# Dump these lists
if self.active_base_files:
LOG.info(_LI('Active base files: %s'),
' '.join(self.active_base_files))
if self.corrupt_base_files:
LOG.info(_LI('Corrupt base files: %s'),
' '.join(self.corrupt_base_files))
if self.removable_base_files:
LOG.info(_LI('Removable base files: %s'),
' '.join(self.removable_base_files))
if self.remove_unused_base_images:
for base_file in self.removable_base_files:
self._remove_base_file(base_file)
# That's it
LOG.debug('Verification complete')
def _get_base(self):
# NOTE(mikal): The new scheme for base images is as follows -- an
# image is streamed from the image service to _base (filename is the
# sha1 hash of the image id). If CoW is enabled, that file is then
# resized to be the correct size for the instance (filename is the
# same as the original, but with an underscore and the resized size
# in bytes). This second file is then CoW'd to the instance disk. If
# CoW is disabled, the resize occurs as part of the copy from the
# cache to the instance directory. Files ending in _sm are no longer
# created, but may remain from previous versions.
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if not os.path.exists(base_dir):
LOG.debug('Skipping verification, no base directory at %s',
base_dir)
return
return base_dir
def update(self, context, all_instances):
base_dir = self._get_base()
if not base_dir:
return
# reset the local statistics
self._reset_state()
# read the cached images
self._list_base_images(base_dir)
# read running instances data
running = self._list_running_instances(context, all_instances)
self.used_images = running['used_images']
self.image_popularity = running['image_popularity']
self.instance_names = running['instance_names']
self.used_swap_images = running['used_swap_images']
# perform the aging and image verification
self._age_and_verify_cached_images(context, all_instances, base_dir)
self._age_and_verify_swap_images(context, base_dir)
|
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import re
import pytest
from pants.core.util_rules.external_tool import (
ExternalTool,
ExternalToolError,
ExternalToolRequest,
TemplatedExternalTool,
UnknownVersion,
UnsupportedVersion,
UnsupportedVersionUsage,
)
from pants.engine.fs import DownloadFile, FileDigest
from pants.engine.platform import Platform
from pants.testutil.option_util import create_subsystem
from pants.testutil.pytest_util import no_exception
class FooBar(ExternalTool):
name = "foobar"
options_scope = "foobar"
default_version = "3.4.7"
default_known_versions = [
"3.2.0|macos_x86_64|1102324cdaacd589e50b8b7770595f220f54e18a1d76ee3c445198f80ab865b8|123346",
"3.2.0|linux_ppc |39e5d64b0f31117c94651c880d0a776159e49eab42b2066219569934b936a5e7|124443",
"3.2.0|linux_x86_64|c0c667fb679a8221bed01bffeed1f80727c6c7827d0cbd8f162195efb12df9e4|121212",
"3.4.7|macos_x86_64|9d0e18cd74b918c7b3edd0203e75569e0c8caecb1367b3be409b45e28514f5be|123321",
"3.4.7|linux_x86_64|a019dfc4b32d63c1392aa264aed2253c1e0c2fb09216f8e2cc269bbfb8bb49b5|134213",
]
def generate_url(self, plat: Platform) -> str:
if plat == Platform.macos_x86_64:
plat_str = "osx-x86_64"
elif plat == Platform.linux_x86_64:
plat_str = "linux-x86_64"
else:
raise ExternalToolError()
return f"https://foobar.org/bin/v{self.version}/foobar-{self.version}-{plat_str}.tgz"
def generate_exe(self, plat: Platform) -> str:
return f"foobar-{self.version}/bin/foobar"
class TemplatedFooBar(TemplatedExternalTool):
name = "foobar"
options_scope = "foobar"
default_version = "3.4.7"
default_known_versions = [
"3.2.0|macos_x86_64|1102324cdaacd589e50b8b7770595f220f54e18a1d76ee3c445198f80ab865b8|123346",
"3.2.0|linux_ppc |39e5d64b0f31117c94651c880d0a776159e49eab42b2066219569934b936a5e7|124443",
"3.2.0|linux_x86_64|c0c667fb679a8221bed01bffeed1f80727c6c7827d0cbd8f162195efb12df9e4|121212",
"3.4.7|macos_x86_64|9d0e18cd74b918c7b3edd0203e75569e0c8caecb1367b3be409b45e28514f5be|123321",
"3.4.7|linux_x86_64|a019dfc4b32d63c1392aa264aed2253c1e0c2fb09216f8e2cc269bbfb8bb49b5|134213",
]
default_url_template = "https://foobar.org/bin/v{version}/foobar-{version}-{platform}.tgz"
default_url_platform_mapping = {
"macos_x86_64": "osx-x86_64",
"macos_arm64": "osx-x86_64",
"linux_x86_64": "linux-x86_64",
}
def generate_exe(self, plat: Platform) -> str:
return f"foobar-{self.version}/bin/foobar"
def test_generate_request() -> None:
def do_test(
expected_url: str, expected_length: int, expected_sha256: str, plat: Platform, version: str
) -> None:
foobar = create_subsystem(
FooBar,
version=version,
known_versions=FooBar.default_known_versions,
)
templated_foobar = create_subsystem(
TemplatedFooBar,
version=version,
known_versions=TemplatedFooBar.default_known_versions,
url_template=TemplatedFooBar.default_url_template,
url_platform_mapping=TemplatedFooBar.default_url_platform_mapping,
)
expected = ExternalToolRequest(
DownloadFile(
url=expected_url, expected_digest=FileDigest(expected_sha256, expected_length)
),
f"foobar-{version}/bin/foobar",
)
assert expected == foobar.get_request(plat)
assert expected == templated_foobar.get_request(plat)
do_test(
"https://foobar.org/bin/v3.2.0/foobar-3.2.0-osx-x86_64.tgz",
123346,
"1102324cdaacd589e50b8b7770595f220f54e18a1d76ee3c445198f80ab865b8",
Platform.macos_x86_64,
"3.2.0",
)
do_test(
"https://foobar.org/bin/v3.4.7/foobar-3.4.7-linux-x86_64.tgz",
134213,
"a019dfc4b32d63c1392aa264aed2253c1e0c2fb09216f8e2cc269bbfb8bb49b5",
Platform.linux_x86_64,
"3.4.7",
)
with pytest.raises(UnknownVersion):
create_subsystem(
FooBar, version="9.9.9", known_versions=FooBar.default_known_versions
).get_request(Platform.macos_x86_64)
class ConstrainedTool(TemplatedExternalTool):
name = "foobar"
options_scope = "foobar"
version_constraints = ">3.2.1, <3.8"
default_version = "v3.4.7"
default_known_versions = [
"v3.2.0|macos_x86_64|1102324cdaacd589e50b8b7770595f220f54e18a1d76ee3c445198f80ab865b8|123346",
"v3.2.0|linux_ppc |39e5d64b0f31117c94651c880d0a776159e49eab42b2066219569934b936a5e7|124443",
"v3.2.0|linux_x86_64|c0c667fb679a8221bed01bffeed1f80727c6c7827d0cbd8f162195efb12df9e4|121212",
"v3.4.7|macos_x86_64|9d0e18cd74b918c7b3edd0203e75569e0c8caecb1367b3be409b45e28514f5be|123321",
"v3.4.7|linux_x86_64|a019dfc4b32d63c1392aa264aed2253c1e0c2fb09216f8e2cc269bbfb8bb49b5|134213",
]
default_url_template = "https://foobar.org/bin/v{version}/foobar-{version}-{platform}.tgz"
default_url_platform_mapping = {
"macos_x86_64": "osx-x86_64",
"macos_arm64": "osx-x86_64",
"linux_x86_64": "linux-x86_64",
}
def generate_exe(self, plat: Platform) -> str:
return f"foobar-{self.version}/bin/foobar"
@pytest.mark.parametrize(
"version, action, assert_expectation, expect_logged",
[
(
"v1.2.3",
UnsupportedVersionUsage.RaiseError,
pytest.raises(
UnsupportedVersion,
match=re.escape(
"The option [foobar].version is set to v1.2.3, which is not compatible with what this release of Pants expects: foobar<3.8,>3.2.1. "
"Please update the version to a supported value, or consider using a different Pants release if you cannot change the version. "
"Alternatively, update [foobar].use_unsupported_version to be 'warning'."
),
),
None,
),
(
"v3.2.2",
UnsupportedVersionUsage.RaiseError,
pytest.raises(
UnknownVersion, match="No known version of foobar v3.2.2 for macos_x86_64 found in"
),
None,
),
(
"v3.4.7",
UnsupportedVersionUsage.RaiseError,
no_exception(),
None,
),
(
"v3.8.0",
UnsupportedVersionUsage.RaiseError,
pytest.raises(
UnsupportedVersion,
match=re.escape(
"The option [foobar].version is set to v3.8.0, which is not compatible with what this release of Pants expects: foobar<3.8,>3.2.1. "
"Please update the version to a supported value, or consider using a different Pants release if you cannot change the version. "
"Alternatively, update [foobar].use_unsupported_version to be 'warning'."
),
),
None,
),
(
"v3.8.0",
UnsupportedVersionUsage.LogWarning,
pytest.raises(
UnknownVersion, match="No known version of foobar v3.8.0 for macos_x86_64 found in"
),
[
(
logging.WARNING,
(
"The option [foobar].version is set to v3.8.0, which is not compatible with what this release of Pants expects: foobar<3.8,>3.2.1. "
"Please update the version to a supported value, or consider using a different Pants release if you cannot change the version. "
"Alternatively, you can ignore this warning (at your own peril) by adding this to the GLOBAL section of pants.toml: "
'ignore_warnings = ["The option [foobar].version is set to"].'
),
)
],
),
],
)
def test_version_constraints(caplog, version, action, assert_expectation, expect_logged) -> None:
caplog.set_level(logging.DEBUG)
caplog.clear()
with assert_expectation:
create_subsystem(
ConstrainedTool,
version=version,
use_unsupported_version=action,
known_versions=ConstrainedTool.default_known_versions,
url_template=ConstrainedTool.default_url_template,
url_platform_mapping=ConstrainedTool.default_url_platform_mapping,
).get_request(Platform.macos_x86_64)
if expect_logged:
assert len(caplog.records) == len(expect_logged)
for idx, (lvl, msg) in enumerate(expect_logged):
log_record = caplog.records[idx]
assert msg in log_record.message
assert lvl == log_record.levelno
else:
assert not caplog.records
|
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import warnings
import IECore
import Gaffer
from . import _GafferUI
import GafferUI
import Qt
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
## The PathListingWidget displays the contents of a Path, updating the Path to represent the
# current directory as the user navigates around. It supports both a list and a tree view,
# allows customisable column listings, and supports both single and multiple selection.
class PathListingWidget( GafferUI.Widget ) :
Column = _GafferUI._PathListingWidgetColumn
StandardColumn = _GafferUI._PathListingWidgetStandardColumn
IconColumn = _GafferUI._PathListingWidgetIconColumn
## A collection of handy column definitions for FileSystemPaths
defaultNameColumn = StandardColumn( "Name", "name" )
defaultFileSystemOwnerColumn = StandardColumn( "Owner", "fileSystem:owner" )
defaultFileSystemModificationTimeColumn = StandardColumn( "Modified", "fileSystem:modificationTime" )
defaultFileSystemIconColumn = _GafferUI._PathListingWidgetFileIconColumn()
defaultFileSystemColumns = (
defaultNameColumn,
defaultFileSystemOwnerColumn,
defaultFileSystemModificationTimeColumn,
defaultFileSystemIconColumn,
)
## A collection of handy column definitions for IndexedIOPaths
## \todo Perhaps these belong in GafferCortexUI?
defaultIndexedIOEntryTypeColumn = StandardColumn( "Entry Type", "indexedIO:entryType" )
defaultIndexedIODataTypeColumn = StandardColumn( "Data Type", "indexedIO:dataType" )
defaultIndexedIOArrayLengthColumn = StandardColumn( "Array Length", "indexedIO:arrayLength" )
defaultIndexedIOColumns = (
defaultNameColumn,
defaultIndexedIOEntryTypeColumn,
defaultIndexedIODataTypeColumn,
defaultIndexedIOArrayLengthColumn,
)
DisplayMode = IECore.Enum.create( "List", "Tree" )
def __init__(
self,
path,
columns = defaultFileSystemColumns,
allowMultipleSelection = False,
displayMode = DisplayMode.List,
sortable = True,
horizontalScrollMode = GafferUI.ScrollMode.Never,
**kw
) :
GafferUI.Widget.__init__( self, _TreeView(), **kw )
self._qtWidget().setAlternatingRowColors( True )
self._qtWidget().setUniformRowHeights( True )
self._qtWidget().setEditTriggers( QtWidgets.QTreeView.NoEditTriggers )
self._qtWidget().activated.connect( Gaffer.WeakMethod( self.__activated ) )
self._qtWidget().setHorizontalScrollBarPolicy( GafferUI.ScrollMode._toQt( horizontalScrollMode ) )
if Qt.__binding__ in ( "PySide2", "PyQt5" ) :
self._qtWidget().header().setSectionsMovable( False )
else :
self._qtWidget().header().setMovable( False )
self._qtWidget().header().setSortIndicator( 0, QtCore.Qt.AscendingOrder )
self._qtWidget().setSortingEnabled( sortable )
self._qtWidget().expansionChanged.connect( Gaffer.WeakMethod( self.__expansionChanged ) )
# install an empty model, so we an construct our selection model
# around it. we'll update the model contents shortly in setPath().
_GafferUI._pathListingWidgetUpdateModel( GafferUI._qtAddress( self._qtWidget() ), None )
_GafferUI._pathListingWidgetSetColumns( GafferUI._qtAddress( self._qtWidget() ), columns )
self.__selectionModel = QtCore.QItemSelectionModel( self._qtWidget().model() )
self._qtWidget().setSelectionModel( self.__selectionModel )
self.__selectionChangedSlot = Gaffer.WeakMethod( self.__selectionChanged )
self._qtWidget().selectionModel().selectionChanged.connect( self.__selectionChangedSlot )
if allowMultipleSelection :
self._qtWidget().setSelectionMode( QtWidgets.QAbstractItemView.ExtendedSelection )
self.__pathSelectedSignal = GafferUI.WidgetSignal()
self.__selectionChangedSignal = GafferUI.WidgetSignal()
self.__displayModeChangedSignal = GafferUI.WidgetSignal()
self.__expansionChangedSignal = GafferUI.WidgetSignal()
# members for implementing drag and drop
self.__emittingButtonPress = False
self.__borrowedButtonPress = None
self.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ), scoped = False )
self.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ), scoped = False )
self.mouseMoveSignal().connect( Gaffer.WeakMethod( self.__mouseMove ), scoped = False )
self.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ), scoped = False )
self.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ), scoped = False )
self.__dragPointer = "paths"
self.__path = None
self.setDisplayMode( displayMode )
self.setPath( path )
def setPath( self, path ) :
if path.isSame( self.__path ) :
return
self.__path = path
self.__pathChangedConnection = self.__path.pathChangedSignal().connect( Gaffer.WeakMethod( self.__pathChanged ) )
self.__currentDir = None
self.__currentPath = ""
self.__update()
def getPath( self ) :
return self.__path
def scrollToPath( self, path ) :
index = self.__indexForPath( path )
if index.isValid() :
self._qtWidget().scrollTo( index, self._qtWidget().EnsureVisible )
## Returns the path being displayed at the specified
# position within the widget. May return None if no path
# exists at that position.
def pathAt( self, position ) :
position = self._qtWidget().viewport().mapFrom(
self._qtWidget(),
QtCore.QPoint( position.x, position.y )
)
index = self._qtWidget().indexAt( position )
if not index.isValid() :
return None
return self.__pathForIndex( index )
## Sets which paths are currently expanded
# using an `IECore.PathMatcher` object.
def setExpansion( self, paths ) :
assert( isinstance( paths, IECore.PathMatcher ) )
self._qtWidget().setExpansion( paths )
## Returns an `IECore.PathMatcher` object containing
# the currently expanded paths.
def getExpansion( self ) :
return _GafferUI._pathListingWidgetGetExpansion( GafferUI._qtAddress( self._qtWidget() ) )
def setPathExpanded( self, path, expanded ) :
index = self.__indexForPath( path )
if index.isValid() :
self._qtWidget().setExpanded( index, expanded )
def getPathExpanded( self, path ) :
index = self.__indexForPath( path )
if index.isValid() :
return self._qtWidget().isExpanded( index )
return False
## \deprecated Use `setExpansion()` instead
def setExpandedPaths( self, paths ) :
self.setExpansion(
IECore.PathMatcher(
[ str( x ) for x in paths ]
)
)
## \deprecated Use `getExpansion()` instead
def getExpandedPaths( self ) :
return _GafferUI._pathListingWidgetPathsForPathMatcher(
GafferUI._qtAddress( self._qtWidget() ),
self.getExpansion()
)
def expansionChangedSignal( self ) :
return self.__expansionChangedSignal
def getDisplayMode( self ) :
if _GafferUI._pathListingWidgetGetFlat( GafferUI._qtAddress( self._qtWidget() ) ) :
return self.DisplayMode.List
else :
return self.DisplayMode.Tree
def setDisplayMode( self, displayMode ) :
if displayMode == self.getDisplayMode() :
return
# It is possible to implement list mode as follows :
#
# ```
# self._qtWidget().setItemsExpandable( False )
# self._qtWidget().setRootIsDecorated( False )
# ```
#
# However, even when doing this QTreeView will call
# QModel::hasChildren() anyway, causing our model to
# recurse one level deeper than strictly necessary.
# This can be costly, so instead we implement list
# view by making the model flat.
_GafferUI._pathListingWidgetSetFlat( GafferUI._qtAddress( self._qtWidget() ), displayMode == self.DisplayMode.List )
self.__displayModeChangedSignal( self )
def displayModeChangedSignal( self ) :
return self.__displayModeChangedSignal
def setColumns( self, columns ) :
if columns == self.getColumns() :
return
_GafferUI._pathListingWidgetSetColumns( GafferUI._qtAddress( self._qtWidget() ), columns )
def getColumns( self ) :
return _GafferUI._pathListingWidgetGetColumns( GafferUI._qtAddress( self._qtWidget() ) )
def setHeaderVisible( self, visible ) :
self._qtWidget().header().setVisible( visible )
def getHeaderVisible( self ) :
return not self._qtWidget().header().isHidden()
## \deprecated Use constructor argument instead.
def setSortable( self, sortable ) :
if sortable == self.getSortable() :
return
self._qtWidget().setSortingEnabled( sortable )
if not sortable :
self._qtWidget().model().sort( -1 )
## \deprecated
def getSortable( self ) :
return self._qtWidget().isSortingEnabled()
## Sets the currently selected paths using an
# `IECore.PathMatcher` object.
def setSelection( self, paths, scrollToFirst=True, expandNonLeaf=True ) :
assert( isinstance( paths, IECore.PathMatcher ) )
# If there are pending changes to our path model, we must perform
# them now, so that the model is valid with respect to the paths
# we're trying to select.
self.__updateLazily.flush( self )
assert( isinstance( paths, IECore.PathMatcher ) )
selectionModel = self._qtWidget().selectionModel()
selectionModel.selectionChanged.disconnect( self.__selectionChangedSlot )
selectionModel.clear()
_GafferUI._pathListingWidgetSetSelection(
GafferUI._qtAddress( self._qtWidget() ),
paths, scrollToFirst, expandNonLeaf
)
selectionModel.selectionChanged.connect( self.__selectionChangedSlot )
self.selectionChangedSignal()( self )
## Returns an `IECore.PathMatcher` object containing
# the currently selected paths.
def getSelection( self ) :
return _GafferUI._pathListingWidgetGetSelection( GafferUI._qtAddress( self._qtWidget() ) )
## \deprecated
def getSelectedPaths( self ) :
return _GafferUI._pathListingWidgetPathsForPathMatcher(
GafferUI._qtAddress( self._qtWidget() ),
self.getSelection()
)
## \deprecated
def setSelectedPaths( self, pathOrPaths, scrollToFirst=True, expandNonLeaf=True ) :
paths = pathOrPaths
if isinstance( pathOrPaths, Gaffer.Path ) :
paths = [ pathOrPaths ]
if self._qtWidget().selectionMode() != QtWidgets.QAbstractItemView.ExtendedSelection :
assert( len( paths ) <= 1 )
self.setSelection(
IECore.PathMatcher( [ str( path ) for path in paths ] ),
scrollToFirst, expandNonLeaf
)
## \deprecated Use getSelectedPaths() instead.
# \todo Remove me
def selectedPaths( self ) :
warnings.warn( "PathListingWidget.selectedPaths() is deprecated, use PathListingWidget.getSelectedPaths() instead.", DeprecationWarning, 2 )
return self.getSelectedPaths()
## This signal is emitted when the selected items change. Use getSelectedPaths()
# to get a list of those items.
def selectionChangedSignal( self ) :
return self.__selectionChangedSignal
## This signal is emitted when the user double clicks on a leaf path.
def pathSelectedSignal( self ) :
return self.__pathSelectedSignal
def setDragPointer( self, dragPointer ) :
self.__dragPointer = dragPointer
def getDragPointer( self ) :
return self.__dragPointer
def __update( self ) :
# update the listing if necessary. when the path itself changes, we only
# want to update if the directory being viewed has changed. if the path
# hasn't changed at all then we assume that the filter has changed and
# we therefore have to update the listing anyway.
# \todo Add an argument to Path.pathChangedSignal() to specify whether it
# is the path or the filtering that has changed, and remove self.__currentPath.
# Also consider whether it might be easier for the C++ PathModel to be
# doing the signal handling at that point.
dirPath = self.__dirPath()
if self.__currentDir!=dirPath or str( self.__path )==self.__currentPath :
selectedPaths = self.getSelectedPaths()
expandedPaths = None
if str( self.__path ) == self.__currentPath :
# the path location itself hasn't changed so we are assuming that just the filter has.
# if we're in the tree view mode, the user would probably be very happy
# if we didn't forget what was expanded.
if self.getDisplayMode() == self.DisplayMode.Tree :
expandedPaths = self.getExpandedPaths()
_GafferUI._pathListingWidgetUpdateModel( GafferUI._qtAddress( self._qtWidget() ), dirPath.copy() )
if expandedPaths is not None :
self.setExpandedPaths( expandedPaths )
self.setSelectedPaths( selectedPaths, scrollToFirst = False, expandNonLeaf = False )
self.__currentDir = dirPath
self.__currentPath = str( self.__path )
@GafferUI.LazyMethod()
def __updateLazily( self ) :
self.__update()
def __dirPath( self ) :
p = self.__path.copy()
if p.isLeaf() :
# if it's a leaf then take the parent
del p[-1]
else :
# it's not a leaf.
if not p.isValid() :
# it's not valid. if we can make it
# valid by trimming the last element
# then do that
if len( p ) :
pp = p.copy()
del pp[-1]
if pp.isValid() :
p = pp
else :
# it's valid and not a leaf, and
# that's what we want.
pass
return p
def __activated( self, modelIndex ) :
activatedPath = self.__pathForIndex( modelIndex )
if self.getDisplayMode() == self.DisplayMode.List :
self.__path[:] = activatedPath[:]
if activatedPath.isLeaf() :
self.pathSelectedSignal()( self )
return True
return False
def __selectionChanged( self, selected, deselected ) :
self.selectionChangedSignal()( self )
return True
def __pathChanged( self, path ) :
# Updates can be expensive, so we coalesce and
# defer them until the last minute.
self.__updateLazily()
def __indexForPath( self, path ) :
result = QtCore.QModelIndex()
_GafferUI._pathListingWidgetIndexForPath(
GafferUI._qtAddress( self._qtWidget() ),
path,
GafferUI._qtAddress( result ),
)
return result
def __pathForIndex( self, modelIndex ) :
return _GafferUI._pathListingWidgetPathForIndex(
GafferUI._qtAddress( self._qtWidget() ),
GafferUI._qtAddress( modelIndex ),
)
def __expansionChanged( self ) :
self.__expansionChangedSignal( self )
def __buttonPress( self, widget, event ) :
if self.__emittingButtonPress :
return False
self.__borrowedButtonPress = None
if event.buttons == event.Buttons.Left and event.modifiers == event.Modifiers.None_ :
# We want to implement drag and drop of the selected items, which means borrowing
# mouse press events that the QTreeView needs to perform selection and expansion.
# This makes things a little tricky. There are are two cases :
#
# 1) There is an existing selection, and it's been clicked on. We borrow the event
# so we can get a dragBeginSignal(), and to prevent the QTreeView reducing a current
# multi-selection down to the single clicked item. If a drag doesn't materialise we'll
# re-emit the event straight to the QTreeView in __buttonRelease so the QTreeView can
# do its thing.
#
# 2) There is no existing selection. We pass the event to the QTreeView
# to see if it will select something which we can subsequently drag.
#
# This is further complicated by the fact that the button presses we simulate for Qt
# will end up back in this function, so we have to be careful to ignore those.
index = self._qtWidget().indexAt( QtCore.QPoint( event.line.p0.x, event.line.p0.y ) )
if self._qtWidget().selectionModel().isSelected( index ) :
# case 1 : existing selection.
self.__borrowedButtonPress = event
return True
else :
# case 2 : no existing selection.
# allow qt to update the selection first.
self.__emitButtonPress( event )
# we must always return True to prevent the event getting passed
# to the QTreeView again, and so we get a chance to start a drag.
return True
return False
def __buttonRelease( self, widget, event ) :
if self.__borrowedButtonPress is not None :
self.__emitButtonPress( self.__borrowedButtonPress )
self.__borrowedButtonPress = None
return False
def __mouseMove( self, widget, event ) :
if event.buttons :
# take the event so that the underlying QTreeView doesn't
# try to do drag-selection, which would ruin our own upcoming drag.
return True
return False
def __dragBegin( self, widget, event ) :
self.__borrowedButtonPress = None
# nothing to drag if there's no valid list entry under the pointer
index = self._qtWidget().indexAt( QtCore.QPoint( event.line.p0.x, event.line.p0.y ) )
if not index.isValid() :
return None
selection = self.getSelection()
if not( selection.isEmpty() ) :
GafferUI.Pointer.setCurrent( self.__dragPointer )
return IECore.StringVectorData( selection.paths() )
return None
def __dragEnd( self, widget, event ) :
GafferUI.Pointer.setCurrent( None )
def __emitButtonPress( self, event ) :
qEvent = QtGui.QMouseEvent(
QtCore.QEvent.MouseButtonPress,
QtCore.QPoint( event.line.p0.x, event.line.p0.y ),
QtCore.Qt.LeftButton,
QtCore.Qt.LeftButton,
QtCore.Qt.NoModifier
)
try :
self.__emittingButtonPress = True
# really i think we should be using QApplication::sendEvent()
# here, but it doesn't seem to be working. it works with the qObject
# in the Widget event filter, but for some reason that differs from
# Widget._owner( qObject )._qtWidget() which is what we have here.
self._qtWidget().mousePressEvent( qEvent )
finally :
self.__emittingButtonPress = False
# Private implementation - a QTreeView with some specific size behaviour, and shift
# clicking for recursive expand/collapse.
class _TreeView( QtWidgets.QTreeView ) :
# This signal is called when some items are either collapsed or
# expanded. It can be preferable to use this over the expanded or
# collapsed signals as it is emitted only once when making several
# changes.
expansionChanged = QtCore.Signal()
def __init__( self ) :
QtWidgets.QTreeView.__init__( self )
self.header().geometriesChanged.connect( self.updateGeometry )
self.header().sectionResized.connect( self.__sectionResized )
self.collapsed.connect( self.__collapsed )
self.expanded.connect( self.__expanded )
self.__recalculatingColumnWidths = False
# the ideal size for each column. we cache these because they're slow to compute
self.__idealColumnWidths = []
# offsets to the ideal sizes made by the user
self.__columnWidthAdjustments = []
self.__currentEventModifiers = QtCore.Qt.NoModifier
def setModel( self, model ) :
QtWidgets.QTreeView.setModel( self, model )
model.modelReset.connect( self.__recalculateColumnSizes )
self.__recalculateColumnSizes()
def setExpansion( self, paths ) :
self.collapsed.disconnect( self.__collapsed )
self.expanded.disconnect( self.__expanded )
self.collapseAll()
# This call is critical to performance - without
# it an update is triggered for every call to
# setExpanded().
self.scheduleDelayedItemsLayout()
_GafferUI._pathListingWidgetSetExpansion( GafferUI._qtAddress( self ), paths )
self.collapsed.connect( self.__collapsed )
self.expanded.connect( self.__expanded )
self.__recalculateColumnSizes()
self.expansionChanged.emit()
def sizeHint( self ) :
result = QtWidgets.QTreeView.sizeHint( self )
margins = self.contentsMargins()
result.setWidth( self.header().length() + margins.left() + margins.right() )
result.setHeight( max( result.width() * .5, result.height() ) )
return result
def event( self, event ) :
if event.type() == event.ShortcutOverride :
if event.key() in ( QtCore.Qt.Key_Up, QtCore.Qt.Key_Down, QtCore.Qt.Key_Left, QtCore.Qt.Key_Right ) :
event.accept()
return True
return QtWidgets.QTreeView.event( self, event )
def mousePressEvent( self, event ) :
# we store the modifiers so that we can turn single
# expands/collapses into recursive ones in __propagateExpanded.
self.__currentEventModifiers = event.modifiers()
QtWidgets.QTreeView.mousePressEvent( self, event )
self.__currentEventModifiers = QtCore.Qt.NoModifier
def mouseReleaseEvent( self, event ) :
# we store the modifiers so that we can turn single
# expands/collapses into recursive ones in __propagateExpanded.
self.__currentEventModifiers = event.modifiers()
QtWidgets.QTreeView.mouseReleaseEvent( self, event )
self.__currentEventModifiers = QtCore.Qt.NoModifier
def mouseDoubleClickEvent( self, event ) :
self.__currentEventModifiers = event.modifiers()
QtWidgets.QTreeView.mouseDoubleClickEvent( self, event )
self.__currentEventModifiers = QtCore.Qt.NoModifier
def __recalculateColumnSizes( self ) :
self.__recalculatingColumnWidths = True
header = self.header()
numColumnsToResize = header.count() - 1 # leave the last section alone, as it's expandable
if numColumnsToResize != len( self.__columnWidthAdjustments ) :
# either the first time in here, or the number of columns has
# changed and we want to start again with the offsets.
self.__columnWidthAdjustments = [ 0 ] * numColumnsToResize
del self.__idealColumnWidths[:]
for i in range( 0, numColumnsToResize ) :
idealWidth = max( header.sectionSizeHint( i ), self.sizeHintForColumn( i ) )
self.__idealColumnWidths.append( idealWidth )
header.resizeSection( i, idealWidth + self.__columnWidthAdjustments[i] )
self.__recalculatingColumnWidths = False
def __sectionResized( self, index, oldWidth, newWidth ) :
if self.__recalculatingColumnWidths :
# we're only interested in resizing being done by the user
return
# store the difference between the ideal size and what the user would prefer, so
# we can apply it again in __recalculateColumnSizes
if len( self.__idealColumnWidths ) > index :
self.__columnWidthAdjustments[index] = newWidth - self.__idealColumnWidths[index]
def __collapsed( self, index ) :
self.__propagateExpanded( index, False )
self.__recalculateColumnSizes()
self.expansionChanged.emit()
def __expanded( self, index ) :
self.__propagateExpanded( index, True )
self.__recalculateColumnSizes()
self.expansionChanged.emit()
def __propagateExpanded( self, index, expanded ) :
numLevels = 0
if self.__currentEventModifiers & QtCore.Qt.ShiftModifier :
numLevels = 10000
elif self.__currentEventModifiers & QtCore.Qt.ControlModifier :
numLevels = 1
if numLevels :
self.collapsed.disconnect( self.__collapsed )
self.expanded.disconnect( self.__expanded )
# This call is critical for performance. Without it,
# QTreeView will start doing relayout for every single
# call to setExpanded() that we make inside
# _pathListingWidgetPropagateExpanded(). With it, it
# waits nicely till the end and does it all at once.
self.scheduleDelayedItemsLayout()
# Defer to C++ to do the heavy lifting.
_GafferUI._pathListingWidgetPropagateExpanded(
GafferUI._qtAddress( self ),
GafferUI._qtAddress( index ),
expanded,
numLevels
)
self.collapsed.connect( self.__collapsed )
self.expanded.connect( self.__expanded )
|
|
from insights.core.spec_factory import SpecSet, RegistryPoint
class Specs(SpecSet):
abrt_ccpp_conf = RegistryPoint(filterable=True)
abrt_status_bare = RegistryPoint()
alternatives_display_python = RegistryPoint()
amq_broker = RegistryPoint(multi_output=True)
ansible_host = RegistryPoint()
auditctl_status = RegistryPoint()
auditd_conf = RegistryPoint()
audit_log = RegistryPoint(filterable=True)
autofs_conf = RegistryPoint()
avc_hash_stats = RegistryPoint()
avc_cache_threshold = RegistryPoint()
aws_instance_id_doc = RegistryPoint()
aws_instance_id_pkcs7 = RegistryPoint()
aws_instance_type = RegistryPoint()
awx_manage_check_license = RegistryPoint()
awx_manage_check_license_data = RegistryPoint(filterable=True)
awx_manage_print_settings = RegistryPoint()
azure_instance_type = RegistryPoint()
azure_instance_plan = RegistryPoint()
bios_uuid = RegistryPoint()
blkid = RegistryPoint()
bond = RegistryPoint(multi_output=True)
bond_dynamic_lb = RegistryPoint(multi_output=True)
boot_loader_entries = RegistryPoint(multi_output=True)
branch_info = RegistryPoint(raw=True)
brctl_show = RegistryPoint()
candlepin_broker = RegistryPoint()
candlepin_error_log = RegistryPoint(filterable=True)
candlepin_log = RegistryPoint(filterable=True)
cdc_wdm = RegistryPoint()
checkin_conf = RegistryPoint()
catalina_out = RegistryPoint(multi_output=True, filterable=True)
catalina_server_log = RegistryPoint(multi_output=True, filterable=True)
cciss = RegistryPoint(multi_output=True)
ceilometer_central_log = RegistryPoint(filterable=True)
ceilometer_collector_log = RegistryPoint(filterable=True)
ceilometer_compute_log = RegistryPoint(filterable=True)
ceilometer_conf = RegistryPoint()
ceph_conf = RegistryPoint(filterable=True)
ceph_config_show = RegistryPoint(multi_output=True)
ceph_df_detail = RegistryPoint()
ceph_health_detail = RegistryPoint()
ceph_insights = RegistryPoint()
ceph_log = RegistryPoint(multi_output=True, filterable=True)
ceph_osd_df = RegistryPoint()
ceph_osd_dump = RegistryPoint()
ceph_osd_ec_profile_get = RegistryPoint(multi_output=True)
ceph_osd_ec_profile_ls = RegistryPoint()
ceph_osd_log = RegistryPoint(multi_output=True, filterable=True)
ceph_osd_tree = RegistryPoint()
ceph_osd_tree_text = RegistryPoint()
ceph_report = RegistryPoint()
ceph_s = RegistryPoint()
ceph_v = RegistryPoint()
certificates_enddate = RegistryPoint()
cgroups = RegistryPoint()
chkconfig = RegistryPoint()
chrony_conf = RegistryPoint()
chronyc_sources = RegistryPoint()
cib_xml = RegistryPoint()
cinder_api_log = RegistryPoint(filterable=True)
cinder_conf = RegistryPoint()
cinder_volume_log = RegistryPoint(filterable=True)
cloud_init_custom_network = RegistryPoint()
cloud_cfg = RegistryPoint(filterable=True)
cloud_init_log = RegistryPoint(filterable=True)
cluster_conf = RegistryPoint(filterable=True)
cmdline = RegistryPoint()
cni_podman_bridge_conf = RegistryPoint()
cobbler_modules_conf = RegistryPoint()
cobbler_settings = RegistryPoint()
corosync = RegistryPoint()
corosync_cmapctl = RegistryPoint(multi_output=True)
corosync_conf = RegistryPoint()
cpe = RegistryPoint()
cpu_cores = RegistryPoint(multi_output=True)
cpu_siblings = RegistryPoint(multi_output=True)
cpu_smt_active = RegistryPoint()
cpu_smt_control = RegistryPoint()
cpu_vulns = RegistryPoint(multi_output=True)
cpu_vulns_meltdown = RegistryPoint()
cpu_vulns_spectre_v1 = RegistryPoint()
cpu_vulns_spectre_v2 = RegistryPoint()
cpu_vulns_spec_store_bypass = RegistryPoint()
cpuinfo_max_freq = RegistryPoint()
cpuinfo = RegistryPoint()
cpupower_frequency_info = RegistryPoint()
cpuset_cpus = RegistryPoint()
crictl_logs = RegistryPoint(multi_output=True, filterable=True)
crio_conf = RegistryPoint(multi_output=True)
cron_daily_rhsmd = RegistryPoint(filterable=True)
crypto_policies_config = RegistryPoint()
crypto_policies_state_current = RegistryPoint()
crypto_policies_opensshserver = RegistryPoint()
crypto_policies_bind = RegistryPoint()
crt = RegistryPoint()
cups_ppd = RegistryPoint(multi_output=True)
current_clocksource = RegistryPoint()
date_iso = RegistryPoint()
date = RegistryPoint()
date_utc = RegistryPoint()
db2ls_a_c = RegistryPoint()
designate_conf = RegistryPoint(filterable=True)
dcbtool_gc_dcb = RegistryPoint(multi_output=True)
df__alP = RegistryPoint()
df__al = RegistryPoint()
df__li = RegistryPoint()
dig_dnssec = RegistryPoint()
dig_edns = RegistryPoint()
dig_noedns = RegistryPoint()
dig = RegistryPoint()
dirsrv = RegistryPoint()
dirsrv_access = RegistryPoint(multi_output=True, filterable=True)
dirsrv_errors = RegistryPoint(multi_output=True, filterable=True)
display_java = RegistryPoint()
display_name = RegistryPoint()
dm_mod_use_blk_mq = RegistryPoint()
dmesg = RegistryPoint(filterable=True)
dmesg_log = RegistryPoint(filterable=True)
dmidecode = RegistryPoint()
dmsetup_info = RegistryPoint()
dmsetup_status = RegistryPoint()
dnf_conf = RegistryPoint(filterable=True)
dnf_modules = RegistryPoint()
dnf_module_list = RegistryPoint()
dnf_module_info = RegistryPoint()
dnsmasq_config = RegistryPoint(multi_output=True)
docker_container_inspect = RegistryPoint(multi_output=True)
docker_host_machine_id = RegistryPoint()
docker_image_inspect = RegistryPoint(multi_output=True)
docker_info = RegistryPoint()
docker_list_containers = RegistryPoint()
docker_list_images = RegistryPoint()
docker_network = RegistryPoint()
docker_storage = RegistryPoint()
docker_storage_setup = RegistryPoint()
docker_sysconfig = RegistryPoint()
dotnet_version = RegistryPoint()
doveconf = RegistryPoint(filterable=True)
dracut_kdump_capture_service = RegistryPoint()
dse_ldif = RegistryPoint(multi_output=True, filterable=True)
du_dirs = RegistryPoint(multi_output=True)
dumpe2fs_h = RegistryPoint(multi_output=True)
engine_config_all = RegistryPoint()
engine_db_query_vdsm_version = RegistryPoint()
engine_log = RegistryPoint(filterable=True)
etc_journald_conf_d = RegistryPoint(multi_output=True)
etc_journald_conf = RegistryPoint()
etc_machine_id = RegistryPoint()
etc_udev_40_redhat_rules = RegistryPoint(filterable=True)
etc_udev_oracle_asm_rules = RegistryPoint(multi_output=True, filterable=True)
etcd_conf = RegistryPoint(filterable=True)
ethernet_interfaces = RegistryPoint()
ethtool_a = RegistryPoint(multi_output=True)
ethtool_c = RegistryPoint(multi_output=True)
ethtool_g = RegistryPoint(multi_output=True)
ethtool_i = RegistryPoint(multi_output=True)
ethtool_k = RegistryPoint(multi_output=True)
ethtool = RegistryPoint(multi_output=True)
ethtool_S = RegistryPoint(multi_output=True)
ethtool_T = RegistryPoint(multi_output=True)
exim_conf = RegistryPoint()
facter = RegistryPoint()
fc_match = RegistryPoint()
fcoeadm_i = RegistryPoint()
fdisk_l = RegistryPoint()
fdisk_l_sos = RegistryPoint(multi_output=True)
findmnt_lo_propagation = RegistryPoint()
firewall_cmd_list_all_zones = RegistryPoint()
firewalld_conf = RegistryPoint(filterable=True)
foreman_production_log = RegistryPoint(filterable=True)
foreman_proxy_conf = RegistryPoint()
foreman_proxy_log = RegistryPoint(filterable=True)
foreman_satellite_log = RegistryPoint(filterable=True)
foreman_ssl_access_ssl_log = RegistryPoint(filterable=True)
foreman_ssl_error_ssl_log = RegistryPoint(filterable=True)
foreman_rake_db_migrate_status = RegistryPoint()
foreman_tasks_config = RegistryPoint(filterable=True)
freeipa_healthcheck_log = RegistryPoint()
fstab = RegistryPoint()
fw_devices = RegistryPoint()
fw_security = RegistryPoint()
galera_cnf = RegistryPoint()
gcp_instance_type = RegistryPoint()
gcp_license_codes = RegistryPoint()
getcert_list = RegistryPoint()
getconf_page_size = RegistryPoint()
getenforce = RegistryPoint()
getsebool = RegistryPoint()
gfs2_file_system_block_size = RegistryPoint(multi_output=True)
glance_api_conf = RegistryPoint()
glance_api_log = RegistryPoint(filterable=True)
glance_cache_conf = RegistryPoint()
glance_registry_conf = RegistryPoint()
gluster_v_info = RegistryPoint()
gluster_v_status = RegistryPoint()
gluster_peer_status = RegistryPoint()
gnocchi_conf = RegistryPoint(filterable=True)
gnocchi_metricd_log = RegistryPoint(filterable=True)
greenboot_status = RegistryPoint(filterable=True)
grubenv = RegistryPoint()
grub_conf = RegistryPoint()
grub_config_perms = RegistryPoint()
grub_efi_conf = RegistryPoint()
grub1_config_perms = RegistryPoint()
grub2_cfg = RegistryPoint()
grub2_efi_cfg = RegistryPoint()
grubby_default_index = RegistryPoint()
grubby_default_kernel = RegistryPoint()
hammer_ping = RegistryPoint()
hammer_task_list = RegistryPoint()
satellite_enabled_features = RegistryPoint()
haproxy_cfg = RegistryPoint()
haproxy_cfg_scl = RegistryPoint()
heat_api_log = RegistryPoint(filterable=True)
heat_conf = RegistryPoint()
heat_crontab = RegistryPoint()
heat_crontab_container = RegistryPoint()
heat_engine_log = RegistryPoint(filterable=True)
hostname = RegistryPoint()
hostname_default = RegistryPoint()
hostname_short = RegistryPoint()
hosts = RegistryPoint()
hponcfg_g = RegistryPoint()
httpd_access_log = RegistryPoint(filterable=True)
httpd_cert_info_in_nss = RegistryPoint(multi_output=True, filterable=True)
httpd_conf = RegistryPoint(multi_output=True)
httpd_conf_scl_httpd24 = RegistryPoint(multi_output=True)
httpd_conf_scl_jbcs_httpd24 = RegistryPoint(multi_output=True)
httpd_error_log = RegistryPoint(filterable=True)
httpd24_httpd_error_log = RegistryPoint(filterable=True)
jbcs_httpd24_httpd_error_log = RegistryPoint(filterable=True)
httpd_limits = RegistryPoint(multi_output=True)
httpd_M = RegistryPoint(multi_output=True)
httpd_on_nfs = RegistryPoint()
httpd_ssl_access_log = RegistryPoint(filterable=True)
httpd_ssl_cert_enddate = RegistryPoint(multi_output=True)
httpd_ssl_error_log = RegistryPoint(filterable=True)
httpd_V = RegistryPoint(multi_output=True)
virt_uuid_facts = RegistryPoint()
ifcfg = RegistryPoint(multi_output=True)
ifcfg_static_route = RegistryPoint(multi_output=True)
ifconfig = RegistryPoint()
imagemagick_policy = RegistryPoint(multi_output=True, filterable=True)
initctl_lst = RegistryPoint()
init_ora = RegistryPoint()
initscript = RegistryPoint(multi_output=True)
init_process_cgroup = RegistryPoint()
insights_client_conf = RegistryPoint(filterable=True)
installed_rpms = RegistryPoint()
interrupts = RegistryPoint()
ip6tables_permanent = RegistryPoint()
ip6tables = RegistryPoint()
ip_addr = RegistryPoint()
ip_addresses = RegistryPoint()
ipaupgrade_log = RegistryPoint(filterable=True)
ipcs_m = RegistryPoint()
ipcs_m_p = RegistryPoint()
ipcs_s = RegistryPoint()
ipcs_s_i = RegistryPoint(multi_output=True)
ip_netns_exec_namespace_lsof = RegistryPoint(multi_output=True, filterable=True)
ip_route_show_table_all = RegistryPoint()
ip_s_link = RegistryPoint()
ipsec_conf = RegistryPoint(filterable=True)
iptables_permanent = RegistryPoint()
iptables = RegistryPoint()
ipv4_neigh = RegistryPoint()
ipv6_neigh = RegistryPoint()
ip_neigh_show = RegistryPoint()
ironic_conf = RegistryPoint(filterable=True)
ironic_inspector_log = RegistryPoint(filterable=True)
iscsiadm_m_session = RegistryPoint()
jboss_domain_server_log = RegistryPoint(multi_output=True, filterable=True)
jboss_standalone_server_log = RegistryPoint(multi_output=True, filterable=True)
jboss_standalone_main_config = RegistryPoint(multi_output=True)
jboss_version = RegistryPoint(multi_output=True)
journal_all = RegistryPoint(filterable=True)
journal_since_boot = RegistryPoint(filterable=True)
katello_service_status = RegistryPoint(filterable=True)
kdump_conf = RegistryPoint()
kerberos_kdc_log = RegistryPoint(filterable=True)
kernel_config = RegistryPoint(multi_output=True, filterable=True)
kexec_crash_loaded = RegistryPoint()
kexec_crash_size = RegistryPoint()
keystone_conf = RegistryPoint()
keystone_crontab = RegistryPoint()
keystone_crontab_container = RegistryPoint()
keystone_log = RegistryPoint(filterable=True)
kpatch_list = RegistryPoint()
krb5 = RegistryPoint(multi_output=True)
ksmstate = RegistryPoint()
kubepods_cpu_quota = RegistryPoint(multi_output=True)
lastupload = RegistryPoint(multi_output=True)
ld_library_path_of_user = RegistryPoint()
ldif_config = RegistryPoint(multi_output=True)
libssh_client_config = RegistryPoint(filterable=True)
libssh_server_config = RegistryPoint(filterable=True)
libvirtd_log = RegistryPoint(filterable=True)
libvirtd_qemu_log = RegistryPoint(multi_output=True, filterable=True)
limits_conf = RegistryPoint(multi_output=True)
locale = RegistryPoint()
localtime = RegistryPoint()
logrotate_conf = RegistryPoint(multi_output=True)
losetup = RegistryPoint()
lpfc_max_luns = RegistryPoint()
lpstat_p = RegistryPoint()
lpstat_protocol_printers = RegistryPoint()
ls_boot = RegistryPoint()
ls_dev = RegistryPoint()
ls_disk = RegistryPoint()
ls_docker_volumes = RegistryPoint()
ls_edac_mc = RegistryPoint()
ls_etc = RegistryPoint()
ls_etc_systemd = RegistryPoint()
ls_ipa_idoverride_memberof = RegistryPoint()
ls_lib_firmware = RegistryPoint()
ls_ocp_cni_openshift_sdn = RegistryPoint()
ls_origin_local_volumes_pods = RegistryPoint()
ls_osroot = RegistryPoint()
ls_run_systemd_generator = RegistryPoint()
ls_R_var_lib_nova_instances = RegistryPoint()
ls_sys_firmware = RegistryPoint()
ls_usr_bin = RegistryPoint(filterable=True)
ls_usr_lib64 = RegistryPoint(filterable=True)
ls_usr_lib_systemd = RegistryPoint()
ls_usr_sbin = RegistryPoint(filterable=True)
ls_var_cache_pulp = RegistryPoint()
ls_var_lib_mongodb = RegistryPoint()
ls_var_lib_nova_instances = RegistryPoint()
ls_var_log = RegistryPoint()
ls_var_opt_mssql = RegistryPoint()
ls_var_opt_mssql_log = RegistryPoint()
ls_var_run = RegistryPoint()
ls_var_spool_clientmq = RegistryPoint()
ls_var_spool_postfix_maildrop = RegistryPoint()
ls_var_tmp = RegistryPoint(filterable=True)
ls_var_www = RegistryPoint()
ls_tmp = RegistryPoint(filterable=True)
lsblk = RegistryPoint()
lsblk_pairs = RegistryPoint()
lscpu = RegistryPoint()
lsinitrd = RegistryPoint(filterable=True)
lsinitrd_lvm_conf = RegistryPoint()
lsmod = RegistryPoint()
lsof = RegistryPoint(filterable=True)
lspci = RegistryPoint()
lspci_vmmkn = RegistryPoint()
lssap = RegistryPoint()
lsscsi = RegistryPoint()
lsvmbus = RegistryPoint()
lvdisplay = RegistryPoint()
lvm_conf = RegistryPoint(filterable=True)
lvmconfig = RegistryPoint()
lvs_noheadings = RegistryPoint()
lvs_noheadings_all = RegistryPoint()
lvs = RegistryPoint()
mac_addresses = RegistryPoint(multi_output=True)
machine_id = RegistryPoint()
manila_conf = RegistryPoint()
mariadb_log = RegistryPoint(filterable=True)
max_uid = RegistryPoint()
mdadm_E = RegistryPoint(multi_output=True)
md5chk_files = RegistryPoint(multi_output=True)
mdstat = RegistryPoint()
meminfo = RegistryPoint()
messages = RegistryPoint(filterable=True)
metadata_json = RegistryPoint(raw=True)
satellite_missed_pulp_agent_queues = RegistryPoint()
mistral_executor_log = RegistryPoint(filterable=True)
mlx4_port = RegistryPoint(multi_output=True)
modinfo_i40e = RegistryPoint()
modinfo_igb = RegistryPoint()
modinfo_ixgbe = RegistryPoint()
modinfo_veth = RegistryPoint()
modinfo_vmxnet3 = RegistryPoint()
modinfo = RegistryPoint(multi_output=True)
modinfo_all = RegistryPoint()
modprobe = RegistryPoint(multi_output=True)
mokutil_sbstate = RegistryPoint()
mongod_conf = RegistryPoint(multi_output=True, filterable=True)
mount = RegistryPoint()
mounts = RegistryPoint()
mssql_conf = RegistryPoint()
mssql_api_assessment = RegistryPoint()
mssql_tls_cert_enddate = RegistryPoint()
multicast_querier = RegistryPoint()
multipath_conf = RegistryPoint()
multipath_conf_initramfs = RegistryPoint()
multipath__v4__ll = RegistryPoint()
mysqladmin_status = RegistryPoint()
mysqladmin_vars = RegistryPoint()
mysql_log = RegistryPoint(multi_output=True, filterable=True)
mysqld_limits = RegistryPoint()
named_checkconf_p = RegistryPoint(filterable=True)
named_conf = RegistryPoint(filterable=True)
namespace = RegistryPoint()
ndctl_list_Ni = RegistryPoint()
netconsole = RegistryPoint()
netstat_agn = RegistryPoint()
netstat_i = RegistryPoint()
netstat = RegistryPoint()
netstat_s = RegistryPoint()
networkmanager_conf = RegistryPoint()
networkmanager_dispatcher_d = RegistryPoint(multi_output=True)
neutron_conf = RegistryPoint(filterable=True)
neutron_sriov_agent = RegistryPoint(filterable=True)
neutron_dhcp_agent_ini = RegistryPoint(filterable=True)
neutron_l3_agent_ini = RegistryPoint(filterable=True)
neutron_l3_agent_log = RegistryPoint(filterable=True)
neutron_metadata_agent_ini = RegistryPoint(filterable=True)
neutron_metadata_agent_log = RegistryPoint(filterable=True)
neutron_ml2_conf = RegistryPoint(filterable=True)
neutron_ovs_agent_log = RegistryPoint(filterable=True)
neutron_plugin_ini = RegistryPoint()
neutron_server_log = RegistryPoint(filterable=True)
nfnetlink_queue = RegistryPoint()
nfs_exports_d = RegistryPoint(multi_output=True)
nfs_exports = RegistryPoint()
nginx_conf = RegistryPoint(multi_output=True)
nginx_ssl_cert_enddate = RegistryPoint(multi_output=True)
nmcli_conn_show = RegistryPoint()
nmcli_dev_show = RegistryPoint()
nmcli_dev_show_sos = RegistryPoint(multi_output=True)
nova_api_log = RegistryPoint(filterable=True)
nova_compute_log = RegistryPoint(filterable=True)
nova_conf = RegistryPoint()
nova_crontab = RegistryPoint()
nova_crontab_container = RegistryPoint()
nova_uid = RegistryPoint()
nova_migration_uid = RegistryPoint()
nscd_conf = RegistryPoint(filterable=True)
nss_rhel7 = RegistryPoint()
nsswitch_conf = RegistryPoint(filterable=True)
ntp_conf = RegistryPoint()
ntpq_leap = RegistryPoint()
ntpq_pn = RegistryPoint()
ntptime = RegistryPoint()
numa_cpus = RegistryPoint(multi_output=True)
numeric_user_group_name = RegistryPoint()
nvme_core_io_timeout = RegistryPoint()
oc_get_bc = RegistryPoint()
oc_get_build = RegistryPoint()
oc_get_clusterrole_with_config = RegistryPoint()
oc_get_clusterrolebinding_with_config = RegistryPoint()
oc_get_dc = RegistryPoint()
oc_get_egressnetworkpolicy = RegistryPoint()
oc_get_endpoints = RegistryPoint()
oc_get_event = RegistryPoint()
oc_get_node = RegistryPoint()
oc_get_pod = RegistryPoint()
oc_get_project = RegistryPoint()
oc_get_pvc = RegistryPoint()
oc_get_pv = RegistryPoint()
oc_get_rc = RegistryPoint()
oc_get_rolebinding = RegistryPoint()
oc_get_role = RegistryPoint()
oc_get_route = RegistryPoint()
oc_get_service = RegistryPoint()
oc_get_configmap = RegistryPoint()
octavia_conf = RegistryPoint(filterable=True)
odbc_ini = RegistryPoint(filterable=True)
odbcinst_ini = RegistryPoint()
open_vm_tools_stat_raw_text_session = RegistryPoint()
openvswitch_other_config = RegistryPoint()
openvswitch_server_log = RegistryPoint(filterable=True)
openshift_certificates = RegistryPoint(multi_output=True)
openshift_fluentd_environ = RegistryPoint(multi_output=True)
openshift_hosts = RegistryPoint(filterable=True)
openshift_router_environ = RegistryPoint(multi_output=True)
openvswitch_daemon_log = RegistryPoint(filterable=True)
openvswitch_server_log = RegistryPoint(filterable=True)
osa_dispatcher_log = RegistryPoint(filterable=True)
ose_master_config = RegistryPoint()
ose_node_config = RegistryPoint()
os_release = RegistryPoint()
ovirt_engine_boot_log = RegistryPoint(filterable=True)
ovirt_engine_confd = RegistryPoint(multi_output=True)
ovirt_engine_console_log = RegistryPoint(filterable=True)
ovirt_engine_server_log = RegistryPoint(filterable=True)
ovirt_engine_ui_log = RegistryPoint(filterable=True)
ovs_appctl_fdb_show_bridge = RegistryPoint(multi_output=True)
ovs_ofctl_dump_flows = RegistryPoint(multi_output=True)
ovs_vsctl_list_bridge = RegistryPoint()
ovs_vsctl_show = RegistryPoint()
ovs_vswitchd_limits = RegistryPoint()
pacemaker_log = RegistryPoint(filterable=True)
package_provides_command = RegistryPoint(filterable=True)
package_provides_java = RegistryPoint(multi_output=True)
package_provides_httpd = RegistryPoint(multi_output=True)
pam_conf = RegistryPoint()
parted__l = RegistryPoint()
partitions = RegistryPoint()
passenger_status = RegistryPoint()
password_auth = RegistryPoint()
pci_rport_target_disk_paths = RegistryPoint()
pcp_metrics = RegistryPoint()
pcs_config = RegistryPoint()
pcs_quorum_status = RegistryPoint()
pcs_status = RegistryPoint()
php_ini = RegistryPoint(filterable=True)
pluginconf_d = RegistryPoint(multi_output=True)
pmlog_summary = RegistryPoint()
pmrep_metrics = RegistryPoint()
podman_container_inspect = RegistryPoint(multi_output=True)
podman_image_inspect = RegistryPoint(multi_output=True)
podman_list_containers = RegistryPoint()
podman_list_images = RegistryPoint()
postconf_builtin = RegistryPoint(filterable=True)
postconf = RegistryPoint(filterable=True)
postgresql_conf = RegistryPoint()
postgresql_log = RegistryPoint(multi_output=True, filterable=True)
prev_uploader_log = RegistryPoint()
proc_netstat = RegistryPoint()
proc_slabinfo = RegistryPoint()
proc_snmp_ipv4 = RegistryPoint()
proc_snmp_ipv6 = RegistryPoint()
proc_stat = RegistryPoint()
ps_alxwww = RegistryPoint(filterable=True)
ps_aux = RegistryPoint(filterable=True)
ps_auxcww = RegistryPoint()
ps_auxww = RegistryPoint(filterable=True)
ps_ef = RegistryPoint(filterable=True)
ps_eo = RegistryPoint()
ps_eo_cmd = RegistryPoint()
pulp_worker_defaults = RegistryPoint()
puppet_ssl_cert_ca_pem = RegistryPoint()
puppetserver_config = RegistryPoint(filterable=True)
puppet_ca_cert_expire_date = RegistryPoint()
pvs_noheadings = RegistryPoint()
pvs_noheadings_all = RegistryPoint()
pvs = RegistryPoint()
qemu_conf = RegistryPoint()
qemu_xml = RegistryPoint(multi_output=True)
ql2xmaxlun = RegistryPoint()
qpid_stat_g = RegistryPoint()
qpid_stat_q = RegistryPoint()
qpid_stat_u = RegistryPoint()
qpidd_conf = RegistryPoint()
rabbitmq_env = RegistryPoint()
rabbitmq_logs = RegistryPoint(multi_output=True, filterable=True)
rabbitmq_policies = RegistryPoint()
rabbitmq_queues = RegistryPoint()
rabbitmq_report = RegistryPoint()
rabbitmq_report_of_containers = RegistryPoint(multi_output=True)
rabbitmq_startup_err = RegistryPoint(filterable=True)
rabbitmq_startup_log = RegistryPoint(filterable=True)
rabbitmq_users = RegistryPoint()
rc_local = RegistryPoint()
rdma_conf = RegistryPoint()
readlink_e_etc_mtab = RegistryPoint()
readlink_e_shift_cert_client = RegistryPoint()
readlink_e_shift_cert_server = RegistryPoint()
redhat_release = RegistryPoint()
resolv_conf = RegistryPoint()
rhev_data_center = RegistryPoint()
rhv_log_collector_analyzer = RegistryPoint()
rhn_charsets = RegistryPoint()
rhn_conf = RegistryPoint()
rhn_entitlement_cert_xml = RegistryPoint(multi_output=True)
rhn_hibernate_conf = RegistryPoint()
rhn_schema_stats = RegistryPoint()
rhn_schema_version = RegistryPoint()
rhn_search_daemon_log = RegistryPoint(filterable=True)
rhn_server_satellite_log = RegistryPoint(filterable=True)
rhn_server_xmlrpc_log = RegistryPoint(filterable=True)
rhn_taskomatic_daemon_log = RegistryPoint(filterable=False)
rhosp_release = RegistryPoint()
rhsm_conf = RegistryPoint()
rhsm_katello_default_ca_cert = RegistryPoint()
rhsm_log = RegistryPoint(filterable=True)
rhsm_releasever = RegistryPoint()
rndc_status = RegistryPoint()
root_crontab = RegistryPoint()
ros_config = RegistryPoint()
route = RegistryPoint()
rpm_ostree_status = RegistryPoint()
rpm_V_packages = RegistryPoint()
rsyslog_conf = RegistryPoint(filterable=True, multi_output=True)
samba = RegistryPoint(filterable=True)
sap_dev_disp = RegistryPoint(multi_output=True, filterable=True)
sap_dev_rd = RegistryPoint(multi_output=True, filterable=True)
sap_hana_landscape = RegistryPoint(multi_output=True)
sap_hdb_version = RegistryPoint(multi_output=True)
sap_host_profile = RegistryPoint(filterable=True)
sapcontrol_getsystemupdatelist = RegistryPoint()
saphostctl_getcimobject_sapinstance = RegistryPoint(filterable=True)
saphostexec_status = RegistryPoint()
saphostexec_version = RegistryPoint()
sat5_insights_properties = RegistryPoint()
satellite_compute_resources = RegistryPoint()
satellite_content_hosts_count = RegistryPoint()
satellite_core_taskreservedresource_count = RegistryPoint()
satellite_custom_ca_chain = RegistryPoint()
satellite_custom_hiera = RegistryPoint()
satellite_katello_empty_url_repositories = RegistryPoint()
satellite_mongodb_storage_engine = RegistryPoint()
satellite_non_yum_type_repos = RegistryPoint()
satellite_qualified_capsules = RegistryPoint()
satellite_qualified_katello_repos = RegistryPoint()
satellite_sca_status = RegistryPoint()
satellite_settings = RegistryPoint()
satellite_version_rb = RegistryPoint()
satellite_yaml = RegistryPoint()
scheduler = RegistryPoint(multi_output=True)
sched_rt_runtime_us = RegistryPoint()
scsi = RegistryPoint()
scsi_mod_max_report_luns = RegistryPoint()
scsi_mod_use_blk_mq = RegistryPoint()
sctp_asc = RegistryPoint()
sctp_eps = RegistryPoint()
sctp_snmp = RegistryPoint()
scsi_eh_deadline = RegistryPoint(multi_output=True)
scsi_fwver = RegistryPoint(multi_output=True)
sealert = RegistryPoint()
secure = RegistryPoint(filterable=True)
selinux_config = RegistryPoint()
sestatus = RegistryPoint()
setup_named_chroot = RegistryPoint(filterable=True)
smartctl = RegistryPoint(multi_output=True)
smartpdc_settings = RegistryPoint(filterable=True)
smbstatus_p = RegistryPoint()
smbstatus_S = RegistryPoint()
sockstat = RegistryPoint()
softnet_stat = RegistryPoint()
software_collections_list = RegistryPoint()
spamassassin_channels = RegistryPoint()
spfile_ora = RegistryPoint(multi_output=True)
ssh_config_d = RegistryPoint(multi_output=True, filterable=True)
ssh_config = RegistryPoint(filterable=True)
ssh_foreman_config = RegistryPoint(filterable=True)
ssh_foreman_proxy_config = RegistryPoint(filterable=True)
sshd_config_perms = RegistryPoint()
sshd_config = RegistryPoint(filterable=True)
ss = RegistryPoint()
sssd_config = RegistryPoint()
sssd_logs = RegistryPoint(multi_output=True, filterable=True)
samba_logs = RegistryPoint(multi_output=True, filterable=True)
subscription_manager_id = RegistryPoint()
subscription_manager_list_consumed = RegistryPoint()
subscription_manager_list_installed = RegistryPoint()
subscription_manager_installed_product_ids = RegistryPoint(filterable=True)
subscription_manager_release_show = RegistryPoint()
swift_conf = RegistryPoint()
swift_log = RegistryPoint(filterable=True)
swift_object_expirer_conf = RegistryPoint()
swift_proxy_server_conf = RegistryPoint()
sys_kernel_sched_features = RegistryPoint()
sysconfig_chronyd = RegistryPoint()
sysconfig_grub = RegistryPoint()
sysconfig_httpd = RegistryPoint()
sysconfig_irqbalance = RegistryPoint()
sysconfig_kdump = RegistryPoint()
sysconfig_libvirt_guests = RegistryPoint()
sysconfig_memcached = RegistryPoint()
sysconfig_mongod = RegistryPoint(multi_output=True)
sysconfig_network = RegistryPoint()
sysconfig_ntpd = RegistryPoint()
sysconfig_oracleasm = RegistryPoint()
sysconfig_prelink = RegistryPoint()
sysconfig_sshd = RegistryPoint()
sysconfig_virt_who = RegistryPoint()
sysctl_conf_initramfs = RegistryPoint(multi_output=True)
sysctl_conf = RegistryPoint()
sysctl = RegistryPoint()
systemctl_cat_dnsmasq_service = RegistryPoint()
systemctl_cat_rpcbind_socket = RegistryPoint()
systemctl_cinder_volume = RegistryPoint()
systemctl_httpd = RegistryPoint()
systemctl_nginx = RegistryPoint()
systemctl_list_unit_files = RegistryPoint()
systemctl_list_units = RegistryPoint()
systemctl_mariadb = RegistryPoint()
systemctl_pulp_workers = RegistryPoint()
systemctl_pulp_resmg = RegistryPoint()
systemctl_pulp_celerybeat = RegistryPoint()
systemctl_qpidd = RegistryPoint()
systemctl_qdrouterd = RegistryPoint()
systemctl_show_all_services = RegistryPoint()
systemctl_show_all_services_with_limited_properties = RegistryPoint()
systemctl_show_target = RegistryPoint()
systemctl_smartpdc = RegistryPoint()
systemctl_status_all = RegistryPoint(filterable=True)
systemd_analyze_blame = RegistryPoint()
systemd_docker = RegistryPoint()
systemd_logind_conf = RegistryPoint()
systemd_openshift_node = RegistryPoint()
systemd_system_conf = RegistryPoint()
systemd_system_origin_accounting = RegistryPoint()
systemid = RegistryPoint()
systool_b_scsi_v = RegistryPoint()
sys_vmbus_device_id = RegistryPoint(multi_output=True)
sys_vmbus_class_id = RegistryPoint(multi_output=True)
testparm_s = RegistryPoint(filterable=True)
testparm_v_s = RegistryPoint(filterable=True)
tags = RegistryPoint()
teamdctl_config_dump = RegistryPoint(multi_output=True)
teamdctl_state_dump = RegistryPoint(multi_output=True)
thp_enabled = RegistryPoint()
thp_use_zero_page = RegistryPoint()
tmpfilesd = RegistryPoint(multi_output=True)
tomcat_server_xml = RegistryPoint(multi_output=True)
tomcat_vdc_fallback = RegistryPoint()
tomcat_vdc_targeted = RegistryPoint(multi_output=True)
tomcat_web_xml = RegistryPoint(multi_output=True)
tuned_adm = RegistryPoint()
tuned_conf = RegistryPoint()
udev_persistent_net_rules = RegistryPoint()
udev_fc_wwpn_id_rules = RegistryPoint(filterable=True)
uname = RegistryPoint()
up2date = RegistryPoint()
up2date_log = RegistryPoint(filterable=True)
uploader_log = RegistryPoint()
uptime = RegistryPoint()
usr_journald_conf_d = RegistryPoint(multi_output=True)
var_qemu_xml = RegistryPoint(multi_output=True)
vdsm_conf = RegistryPoint()
vdsm_id = RegistryPoint()
vdsm_import_log = RegistryPoint(multi_output=True, filterable=True)
vdsm_log = RegistryPoint(filterable=True)
vdsm_logger_conf = RegistryPoint()
version_info = RegistryPoint()
vdo_status = RegistryPoint()
vgdisplay = RegistryPoint()
vgs_noheadings = RegistryPoint()
vgs_noheadings_all = RegistryPoint()
vgs = RegistryPoint()
vhost_net_zero_copy_tx = RegistryPoint()
virsh_list_all = RegistryPoint()
virt_what = RegistryPoint()
virt_who_conf = RegistryPoint(multi_output=True, filterable=True)
virtlogd_conf = RegistryPoint(filterable=True)
vma_ra_enabled = RegistryPoint()
vmcore_dmesg = RegistryPoint(multi_output=True, filterable=True)
vmware_tools_conf = RegistryPoint()
vsftpd_conf = RegistryPoint(filterable=True)
vsftpd = RegistryPoint()
x86_pti_enabled = RegistryPoint()
x86_ibpb_enabled = RegistryPoint()
x86_ibrs_enabled = RegistryPoint()
x86_retp_enabled = RegistryPoint()
xfs_info = RegistryPoint(multi_output=True)
xinetd_conf = RegistryPoint(multi_output=True)
yum_conf = RegistryPoint()
yum_list_available = RegistryPoint()
yum_list_installed = RegistryPoint()
yum_log = RegistryPoint()
yum_repolist = RegistryPoint()
yum_repos_d = RegistryPoint(multi_output=True)
yum_updateinfo = RegistryPoint()
yum_updates = RegistryPoint()
zdump_v = RegistryPoint()
zipl_conf = RegistryPoint()
sendq_socket_buffer = RegistryPoint()
recvq_socket_buffer = RegistryPoint()
|
|
"""
General Utilities
(part of web.py)
"""
__all__ = [
"Storage", "storage", "storify",
"iters",
"rstrips", "lstrips", "strips",
"TimeoutError", "timelimit",
"Memoize", "memoize",
"re_compile", "re_subm",
"group",
"IterBetter", "iterbetter",
"dictreverse", "dictfind", "dictfindall", "dictincr", "dictadd",
"listget", "intget", "datestr",
"numify", "denumify", "dateify",
"CaptureStdout", "capturestdout", "Profile", "profile",
"tryall",
"ThreadedDict",
"autoassign",
"to36",
"safemarkdown"
]
import re, sys, time, threading
try: import datetime
except ImportError: pass
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<Storage {'value': 1}>
>>> storify({}, a={}).a
{}
"""
def getvalue(x):
if hasattr(x, 'value'):
return x.value
else:
return x
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in defaults.iteritems():
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
iters = [list, tuple]
import __builtin__
if hasattr(__builtin__, 'set'):
iters.append(set)
try:
from sets import Set
iters.append(Set)
except ImportError:
pass
class _hack(tuple): pass
iters = _hack(iters)
iters.__doc__ = """
A list of iterable items (like lists, but not strings). Includes whichever
of lists, tuples, sets, and Sets are available in this version of Python.
"""
def _strips(direction, text, remove):
if direction == 'l':
if text.startswith(remove):
return text[len(remove):]
elif direction == 'r':
if text.endswith(remove):
return text[:-len(remove)]
else:
raise ValueError, "Direction needs to be r or l."
return text
def rstrips(text, remove):
"""
removes the string `remove` from the right of `text`
>>> rstrips("foobar", "bar")
'foo'
"""
return _strips('r', text, remove)
def lstrips(text, remove):
"""
removes the string `remove` from the left of `text`
>>> lstrips("foobar", "foo")
'bar'
"""
return _strips('l', text, remove)
def strips(text, remove):
"""removes the string `remove` from the both sides of `text`
>>> strips("foobarfoo", "foo")
'bar'
"""
return rstrips(lstrips(text, remove), remove)
class TimeoutError(Exception): pass
def timelimit(timeout):
"""
A decorator to limit a function to `timeout` seconds, raising `TimeoutError`
if it takes longer.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>>
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
TimeoutError: took too long
>>> timelimit(1)(meaningoflife)()
42
_Caveat:_ The function isn't stopped after `timeout` seconds but continues
executing in a separate thread. (There seems to be no way to kill a thread.)
inspired by <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878>
"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
self.setDaemon(True)
self.start()
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Dispatch()
c.join(timeout)
if c.isAlive():
raise TimeoutError, 'took too long'
if c.error:
raise c.error[0], c.error[1]
return c.result
return _2
return _1
class Memoize:
"""
'Memoizes' a function, caching its return values for each input.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>> fastlife = memoize(meaningoflife)
>>> meaningoflife()
42
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
TimeoutError: took too long
>>> fastlife()
42
>>> timelimit(.1)(fastlife)()
42
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **keywords):
key = (args, tuple(keywords.items()))
if key not in self.cache:
self.cache[key] = self.func(*args, **keywords)
return self.cache[key]
memoize = Memoize
re_compile = memoize(re.compile) #@@ threadsafe?
re_compile.__doc__ = """
A memoized version of re.compile.
"""
class _re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ''
def re_subm(pat, repl, string):
"""
Like re.sub, but returns the replacement _and_ the match object.
>>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
>>> t
'foooooolish'
>>> m.groups()
('oooooo',)
"""
compiled_pat = re_compile(pat)
proxy = _re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
>>> list(group([1,2,3,4], 2))
[[1, 2], [3, 4]]
"""
if not hasattr(seq, 'next'):
seq = iter(seq)
while True:
yield [seq.next() for i in xrange(size)]
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
>>> import itertools
>>> c = iterbetter(itertools.count())
>>> c[1]
1
>>> c[5]
5
>>> c[3]
Traceback (most recent call last):
...
IndexError: already passed 3
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def __iter__(self):
while 1:
yield self.i.next()
self.c += 1
def __getitem__(self, i):
#todo: slices
if i < self.c:
raise IndexError, "already passed "+str(i)
try:
while i > self.c:
self.i.next()
self.c += 1
# now self.c == i
self.c += 1
return self.i.next()
except StopIteration:
raise IndexError, str(i)
iterbetter = IterBetter
def dictreverse(mapping):
"""
>>> dictreverse({1: 2, 3: 4})
{2: 1, 4: 3}
"""
return dict([(value, key) for (key, value) in mapping.iteritems()])
def dictfind(dictionary, element):
"""
Returns a key whose value in `dictionary` is `element`
or, if none exists, None.
>>> d = {1:2, 3:4}
>>> dictfind(d, 4)
3
>>> dictfind(d, 5)
"""
for (key, value) in dictionary.iteritems():
if element is value:
return key
def dictfindall(dictionary, element):
"""
Returns the keys whose values in `dictionary` are `element`
or, if none exists, [].
>>> d = {1:4, 3:4}
>>> dictfindall(d, 4)
[1, 3]
>>> dictfindall(d, 5)
[]
"""
res = []
for (key, value) in dictionary.iteritems():
if element is value:
res.append(key)
return res
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
>>> d = {1:2, 3:4}
>>> dictincr(d, 1)
3
>>> d[1]
3
>>> dictincr(d, 5)
1
>>> d[5]
1
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
def dictadd(dict_a, dict_b):
"""
Returns a dictionary consisting of the keys in `a` and `b`.
If they share a key, the value from b is used.
>>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1})
{1: 0, 2: 1, 3: 1}
"""
result = {}
result.update(dict_a)
result.update(dict_b)
return result
def listget(lst, ind, default=None):
"""
Returns `lst[ind]` if it exists, `default` otherwise.
>>> listget(['a'], 0)
'a'
>>> listget(['a'], 1)
>>> listget(['a'], 1, 'b')
'b'
"""
if len(lst)-1 < ind:
return default
return lst[ind]
def intget(integer, default=None):
"""
Returns `integer` as an int or `default` if it can't.
>>> intget('3')
3
>>> intget('3a')
>>> intget('3a', 0)
0
"""
try:
return int(integer)
except (TypeError, ValueError):
return default
def datestr(then, now=None):
"""
Converts a (UTC) datetime object to a nice string representation.
>>> from datetime import datetime, timedelta
>>> d = datetime(1970, 5, 1)
>>> datestr(d, now=d)
'0 microseconds ago'
>>> for t, v in {
... timedelta(microseconds=1): '1 microsecond ago',
... timedelta(microseconds=2): '2 microseconds ago',
... -timedelta(microseconds=1): '1 microsecond from now',
... -timedelta(microseconds=2): '2 microseconds from now',
... timedelta(microseconds=2000): '2 milliseconds ago',
... timedelta(seconds=2): '2 seconds ago',
... timedelta(seconds=2*60): '2 minutes ago',
... timedelta(seconds=2*60*60): '2 hours ago',
... timedelta(days=2): '2 days ago',
... }.iteritems():
... assert datestr(d, now=d+t) == v
>>> datestr(datetime(1970, 1, 1), now=d)
'January 1'
>>> datestr(datetime(1969, 1, 1), now=d)
'January 1, 1969'
>>> datestr(datetime(1970, 6, 1), now=d)
'June 1, 1970'
"""
def agohence(n, what, divisor=None):
if divisor: n = n // divisor
out = str(abs(n)) + ' ' + what # '2 day'
if abs(n) != 1: out += 's' # '2 days'
out += ' ' # '2 days '
if n < 0:
out += 'from now'
else:
out += 'ago'
return out # '2 days ago'
oneday = 24 * 60 * 60
if not now: now = datetime.datetime.utcnow()
delta = now - then
deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
deltadays = abs(deltaseconds) // oneday
if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
if deltadays:
if abs(deltadays) < 4:
return agohence(deltadays, 'day')
out = then.strftime('%B %e') # e.g. 'June 13'
if then.year != now.year or deltadays < 0:
out += ', %s' % then.year
return out
if int(deltaseconds):
if abs(deltaseconds) > (60 * 60):
return agohence(deltaseconds, 'hour', 60 * 60)
elif abs(deltaseconds) > 60:
return agohence(deltaseconds, 'minute', 60)
else:
return agohence(deltaseconds, 'second')
deltamicroseconds = delta.microseconds
if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
if abs(deltamicroseconds) > 1000:
return agohence(deltamicroseconds, 'millisecond', 1000)
return agohence(deltamicroseconds, 'microsecond')
def numify(string):
"""
Removes all non-digit characters from `string`.
>>> numify('800-555-1212')
'8005551212'
"""
return ''.join(c for c in str(string).split('.')[0] if c.isdigit())
def denumify(string, pattern):
"""
Formats `string` according to `pattern`, where the letter X gets replaced
by characters from `string`.
>>> denumify("8005551212", "(XXX) XXX-XXXX")
'(800) 555-1212'
"""
out = []
for c in pattern:
if c == "X":
out.append(string[0])
string = string[1:]
else:
out.append(c)
return ''.join(out)
def dateify(datestring):
"""
Formats a numified `datestring` properly.
"""
return denumify(datestring, "XXXX-XX-XX XX:XX:XX")
class CaptureStdout:
"""
Captures everything `func` prints to stdout and returns it instead.
>>> def idiot():
... print "foo"
>>> capturestdout(idiot)()
'foo\\n'
**WARNING:** Not threadsafe!
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **keywords):
from cStringIO import StringIO
# Not threadsafe!
out = StringIO()
oldstdout = sys.stdout
sys.stdout = out
try:
self.func(*args, **keywords)
finally:
sys.stdout = oldstdout
return out.getvalue()
capturestdout = CaptureStdout
class Profile:
"""
Profiles `func` and returns a tuple containing its output
and a string with human-readable profiling information.
>>> import time
>>> out, inf = profile(time.sleep)(.001)
>>> out
>>> inf[:10].strip()
'took 0.0'
"""
def __init__(self, func):
self.func = func
def __call__(self, *args): ##, **kw): kw unused
import hotshot, hotshot.stats, tempfile ##, time already imported
temp = tempfile.NamedTemporaryFile()
prof = hotshot.Profile(temp.name)
stime = time.time()
result = prof.runcall(self.func, *args)
stime = time.time() - stime
prof.close()
stats = hotshot.stats.load(temp.name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
x = '\n\ntook '+ str(stime) + ' seconds\n'
x += capturestdout(stats.print_stats)(40)
x += capturestdout(stats.print_callers)()
return result, x
profile = Profile
import traceback
# hack for compatibility with Python 2.3:
if not hasattr(traceback, 'format_exc'):
from cStringIO import StringIO
def format_exc(limit=None):
strbuf = StringIO()
traceback.print_exc(limit, strbuf)
return strbuf.getvalue()
traceback.format_exc = format_exc
def tryall(context, prefix=None):
"""
Tries a series of functions and prints their results.
`context` is a dictionary mapping names to values;
the value will only be tried if it's callable.
>>> tryall(dict(j=lambda: True))
j: True
----------------------------------------
results:
True: 1
For example, you might have a file `test/stuff.py`
with a series of functions testing various things in it.
At the bottom, have a line:
if __name__ == "__main__": tryall(globals())
Then you can run `python test/stuff.py` and get the results of
all the tests.
"""
context = context.copy() # vars() would update
results = {}
for (key, value) in context.iteritems():
if not hasattr(value, '__call__'):
continue
if prefix and not key.startswith(prefix):
continue
print key + ':',
try:
r = value()
dictincr(results, r)
print r
except:
print 'ERROR'
dictincr(results, 'ERROR')
print ' ' + '\n '.join(traceback.format_exc().split('\n'))
print '-'*40
print 'results:'
for (key, value) in results.iteritems():
print ' '*2, str(key)+':', value
class ThreadedDict:
"""
Takes a dictionary that maps threads to objects.
When a thread tries to get or set an attribute or item
of the threadeddict, it passes it on to the object
for that thread in dictionary.
"""
def __init__(self, dictionary):
self.__dict__['_ThreadedDict__d'] = dictionary
def __getattr__(self, attr):
return getattr(self.__d[threading.currentThread()], attr)
def __getitem__(self, item):
return self.__d[threading.currentThread()][item]
def __setattr__(self, attr, value):
if attr == '__doc__':
self.__dict__[attr] = value
else:
return setattr(self.__d[threading.currentThread()], attr, value)
def __setitem__(self, item, value):
self.__d[threading.currentThread()][item] = value
def __hash__(self):
return hash(self.__d[threading.currentThread()])
threadeddict = ThreadedDict
def autoassign(self, locals):
"""
Automatically assigns local variables to `self`.
>>> self = storage()
>>> autoassign(self, dict(a=1, b=2))
>>> self
<Storage {'a': 1, 'b': 2}>
Generally used in `__init__` methods, as in:
def __init__(self, foo, bar, baz=1): autoassign(self, locals())
"""
for (key, value) in locals.iteritems():
if key == 'self':
continue
setattr(self, key, value)
def to36(q):
"""
Converts an integer to base 36 (a useful scheme for human-sayable IDs).
>>> to36(35)
'z'
>>> to36(119292)
'2k1o'
>>> int(to36(939387374), 36)
939387374
>>> to36(0)
'0'
>>> to36(-393)
Traceback (most recent call last):
...
ValueError: must supply a positive integer
"""
if q < 0: raise ValueError, "must supply a positive integer"
letters = "0123456789abcdefghijklmnopqrstuvwxyz"
converted = []
while q != 0:
q, r = divmod(q, 36)
converted.insert(0, letters[r])
return "".join(converted) or '0'
r_url = re_compile('(?<!\()(http://(\S+))')
def safemarkdown(text):
"""
Converts text to HTML following the rules of Markdown, but blocking any
outside HTML input, so that only the things supported by Markdown
can be used. Also converts raw URLs to links.
(requires [markdown.py](http://webpy.org/markdown.py))
"""
from markdown import markdown
if text:
text = text.replace('<', '<')
# TODO: automatically get page title?
text = r_url.sub(r'<\1>', text)
text = markdown(text)
return text
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
from copy import deepcopy
from functools import wraps
from django.core.exceptions import FieldError
from django.db import models
from django.db.models.fields.files import FileDescriptor
from django.db.models.query_utils import DeferredAttribute
class DescriptorMixin:
tracker_instance = None
def __get__(self, instance, owner):
if instance is None:
return self
was_deferred = False
field_name = self._get_field_name()
if field_name in instance._deferred_fields:
instance._deferred_fields.remove(field_name)
was_deferred = True
value = super().__get__(instance, owner)
if was_deferred:
self.tracker_instance.saved_data[field_name] = deepcopy(value)
return value
def _get_field_name(self):
return self.field_name
class DescriptorWrapper:
def __init__(self, field_name, descriptor, tracker_attname):
self.field_name = field_name
self.descriptor = descriptor
self.tracker_attname = tracker_attname
def __get__(self, instance, owner):
if instance is None:
return self
was_deferred = self.field_name in instance.get_deferred_fields()
try:
value = self.descriptor.__get__(instance, owner)
except AttributeError:
value = self.descriptor
if was_deferred:
tracker_instance = getattr(instance, self.tracker_attname)
tracker_instance.saved_data[self.field_name] = deepcopy(value)
return value
def __set__(self, instance, value):
initialized = hasattr(instance, '_instance_initialized')
was_deferred = self.field_name in instance.get_deferred_fields()
# Sentinel attribute to detect whether we are already trying to
# set the attribute higher up the stack. This prevents infinite
# recursion when retrieving deferred values from the database.
recursion_sentinel_attname = '_setting_' + self.field_name
already_setting = hasattr(instance, recursion_sentinel_attname)
if initialized and was_deferred and not already_setting:
setattr(instance, recursion_sentinel_attname, True)
try:
# Retrieve the value to set the saved_data value.
# This will undefer the field
getattr(instance, self.field_name)
finally:
instance.__dict__.pop(recursion_sentinel_attname, None)
if hasattr(self.descriptor, '__set__'):
self.descriptor.__set__(instance, value)
else:
instance.__dict__[self.field_name] = value
@staticmethod
def cls_for_descriptor(descriptor):
if hasattr(descriptor, '__delete__'):
return FullDescriptorWrapper
else:
return DescriptorWrapper
class FullDescriptorWrapper(DescriptorWrapper):
"""
Wrapper for descriptors with all three descriptor methods.
"""
def __delete__(self, obj):
self.descriptor.__delete__(obj)
class FieldInstanceTracker:
def __init__(self, instance, fields, field_map):
self.instance = instance
self.fields = fields
self.field_map = field_map
@property
def deferred_fields(self):
return self.instance.get_deferred_fields()
def get_field_value(self, field):
return getattr(self.instance, self.field_map[field])
def set_saved_fields(self, fields=None):
if not self.instance.pk:
self.saved_data = {}
elif fields is None:
self.saved_data = self.current()
else:
self.saved_data.update(**self.current(fields=fields))
# preventing mutable fields side effects
for field, field_value in self.saved_data.items():
self.saved_data[field] = deepcopy(field_value)
def current(self, fields=None):
"""Returns dict of current values for all tracked fields"""
if fields is None:
deferred_fields = self.deferred_fields
if deferred_fields:
fields = [
field for field in self.fields
if field not in deferred_fields
]
else:
fields = self.fields
return {f: self.get_field_value(f) for f in fields}
def has_changed(self, field):
"""Returns ``True`` if field has changed from currently saved value"""
if field in self.fields:
# deferred fields haven't changed
if field in self.deferred_fields and field not in self.instance.__dict__:
return False
return self.previous(field) != self.get_field_value(field)
else:
raise FieldError('field "%s" not tracked' % field)
def previous(self, field):
"""Returns currently saved value of given field"""
# handle deferred fields that have not yet been loaded from the database
if self.instance.pk and field in self.deferred_fields and field not in self.saved_data:
# if the field has not been assigned locally, simply fetch and un-defer the value
if field not in self.instance.__dict__:
self.get_field_value(field)
# if the field has been assigned locally, store the local value, fetch the database value,
# store database value to saved_data, and restore the local value
else:
current_value = self.get_field_value(field)
self.instance.refresh_from_db(fields=[field])
self.saved_data[field] = deepcopy(self.get_field_value(field))
setattr(self.instance, self.field_map[field], current_value)
return self.saved_data.get(field)
def changed(self):
"""Returns dict of fields that changed since save (with old values)"""
return {
field: self.previous(field)
for field in self.fields
if self.has_changed(field)
}
def init_deferred_fields(self):
self.instance._deferred_fields = set()
if hasattr(self.instance, '_deferred') and not self.instance._deferred:
return
class DeferredAttributeTracker(DescriptorMixin, DeferredAttribute):
tracker_instance = self
class FileDescriptorTracker(DescriptorMixin, FileDescriptor):
tracker_instance = self
def _get_field_name(self):
return self.field.name
self.instance._deferred_fields = self.instance.get_deferred_fields()
for field in self.instance._deferred_fields:
field_obj = self.instance.__class__.__dict__.get(field)
if isinstance(field_obj, FileDescriptor):
field_tracker = FileDescriptorTracker(field_obj.field)
setattr(self.instance.__class__, field, field_tracker)
else:
field_tracker = DeferredAttributeTracker(field)
setattr(self.instance.__class__, field, field_tracker)
class FieldTracker:
tracker_class = FieldInstanceTracker
def __init__(self, fields=None):
self.fields = fields
def get_field_map(self, cls):
"""Returns dict mapping fields names to model attribute names"""
field_map = {field: field for field in self.fields}
all_fields = {f.name: f.attname for f in cls._meta.fields}
field_map.update(**{k: v for (k, v) in all_fields.items()
if k in field_map})
return field_map
def contribute_to_class(self, cls, name):
self.name = name
self.attname = '_%s' % name
models.signals.class_prepared.connect(self.finalize_class, sender=cls)
def finalize_class(self, sender, **kwargs):
if self.fields is None:
self.fields = (field.attname for field in sender._meta.fields)
self.fields = set(self.fields)
for field_name in self.fields:
descriptor = getattr(sender, field_name)
wrapper_cls = DescriptorWrapper.cls_for_descriptor(descriptor)
wrapped_descriptor = wrapper_cls(field_name, descriptor, self.attname)
setattr(sender, field_name, wrapped_descriptor)
self.field_map = self.get_field_map(sender)
models.signals.post_init.connect(self.initialize_tracker)
self.model_class = sender
setattr(sender, self.name, self)
self.patch_save(sender)
def initialize_tracker(self, sender, instance, **kwargs):
if not isinstance(instance, self.model_class):
return # Only init instances of given model (including children)
tracker = self.tracker_class(instance, self.fields, self.field_map)
setattr(instance, self.attname, tracker)
tracker.set_saved_fields()
instance._instance_initialized = True
def patch_save(self, model):
self._patch(model, 'save_base', 'update_fields')
self._patch(model, 'refresh_from_db', 'fields')
def _patch(self, model, method, fields_kwarg):
original = getattr(model, method)
@wraps(original)
def inner(instance, *args, **kwargs):
ret = original(instance, *args, **kwargs)
update_fields = kwargs.get(fields_kwarg)
if not update_fields and update_fields is not None: # () or []
fields = update_fields
elif update_fields is None:
fields = None
else:
fields = (
field for field in update_fields if
field in self.fields
)
getattr(instance, self.attname).set_saved_fields(
fields=fields
)
return ret
setattr(model, method, inner)
def __get__(self, instance, owner):
if instance is None:
return self
else:
return getattr(instance, self.attname)
class ModelInstanceTracker(FieldInstanceTracker):
def has_changed(self, field):
"""Returns ``True`` if field has changed from currently saved value"""
if not self.instance.pk:
return True
elif field in self.saved_data:
return self.previous(field) != self.get_field_value(field)
else:
raise FieldError('field "%s" not tracked' % field)
def changed(self):
"""Returns dict of fields that changed since save (with old values)"""
if not self.instance.pk:
return {}
saved = self.saved_data.items()
current = self.current()
return {k: v for k, v in saved if v != current[k]}
class ModelTracker(FieldTracker):
tracker_class = ModelInstanceTracker
def get_field_map(self, cls):
return {field: field for field in self.fields}
|
|
# -*- coding: utf-8 -*-
"""
kombu.async.timer
=================
Timer scheduling Python callbacks.
"""
from __future__ import absolute_import
import heapq
import sys
from collections import namedtuple
from datetime import datetime
from functools import wraps
from time import time
from weakref import proxy as weakrefproxy
from kombu.five import monotonic
from kombu.log import get_logger
try:
from pytz import utc
except ImportError:
utc = None
DEFAULT_MAX_INTERVAL = 2
EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=utc)
IS_PYPY = hasattr(sys, 'pypy_version_info')
logger = get_logger(__name__)
__all__ = ['Entry', 'Timer', 'to_timestamp']
scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry'))
def to_timestamp(d, default_timezone=utc):
if isinstance(d, datetime):
if d.tzinfo is None:
d = d.replace(tzinfo=default_timezone)
return max((d - EPOCH).total_seconds(), 0)
return d
class Entry(object):
if not IS_PYPY: # pragma: no cover
__slots__ = (
'fun', 'args', 'kwargs', 'tref', 'canceled',
'_last_run', '__weakref__',
)
def __init__(self, fun, args=None, kwargs=None):
self.fun = fun
self.args = args or []
self.kwargs = kwargs or {}
self.tref = weakrefproxy(self)
self._last_run = None
self.canceled = False
def __call__(self):
return self.fun(*self.args, **self.kwargs)
def cancel(self):
try:
self.tref.canceled = True
except ReferenceError: # pragma: no cover
pass
def __repr__(self):
return '<TimerEntry: {0}(*{1!r}, **{2!r})'.format(
self.fun.__name__, self.args, self.kwargs)
def __hash__(self):
return hash((self.fun, repr(self.args), repr(self.kwargs)))
# must not use hash() to order entries
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self.__eq__(other)
@property
def cancelled(self):
return self.canceled
@cancelled.setter
def cancelled(self, value):
self.canceled = value
class Timer(object):
"""ETA scheduler."""
Entry = Entry
on_error = None
def __init__(self, max_interval=None, on_error=None, **kwargs):
self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL)
self.on_error = on_error or self.on_error
self._queue = []
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stop()
def call_at(self, eta, fun, args=(), kwargs={}, priority=0):
return self.enter_at(self.Entry(fun, args, kwargs), eta, priority)
def call_after(self, secs, fun, args=(), kwargs={}, priority=0):
return self.enter_after(secs, self.Entry(fun, args, kwargs), priority)
def call_repeatedly(self, secs, fun, args=(), kwargs={}, priority=0):
tref = self.Entry(fun, args, kwargs)
@wraps(fun)
def _reschedules(*args, **kwargs):
last, now = tref._last_run, monotonic()
lsince = (now - tref._last_run) if last else secs
try:
if lsince and lsince >= secs:
tref._last_run = now
return fun(*args, **kwargs)
finally:
if not tref.canceled:
last = tref._last_run
next = secs - (now - last) if last else secs
self.enter_after(next, tref, priority)
tref.fun = _reschedules
tref._last_run = None
return self.enter_after(secs, tref, priority)
def enter_at(self, entry, eta=None, priority=0, time=time):
"""Enter function into the scheduler.
:param entry: Item to enter.
:keyword eta: Scheduled time as a :class:`datetime.datetime` object.
:keyword priority: Unused.
"""
if eta is None:
eta = time()
if isinstance(eta, datetime):
try:
eta = to_timestamp(eta)
except Exception as exc:
if not self.handle_error(exc):
raise
return
return self._enter(eta, priority, entry)
def enter_after(self, secs, entry, priority=0, time=time):
return self.enter_at(entry, time() + secs, priority)
def _enter(self, eta, priority, entry, push=heapq.heappush):
push(self._queue, scheduled(eta, priority, entry))
return entry
def apply_entry(self, entry):
try:
entry()
except Exception as exc:
if not self.handle_error(exc):
logger.error('Error in timer: %r', exc, exc_info=True)
def handle_error(self, exc_info):
if self.on_error:
self.on_error(exc_info)
return True
def stop(self):
pass
def __iter__(self, min=min, nowfun=time,
pop=heapq.heappop, push=heapq.heappush):
"""This iterator yields a tuple of ``(entry, wait_seconds)``,
where if entry is :const:`None` the caller should wait
for ``wait_seconds`` until it polls the schedule again."""
max_interval = self.max_interval
queue = self._queue
while 1:
if queue:
eventA = queue[0]
now, eta = nowfun(), eventA[0]
if now < eta:
yield min(eta - now, max_interval), None
else:
eventB = pop(queue)
if eventB is eventA:
entry = eventA[2]
if not entry.canceled:
yield None, entry
continue
else:
push(queue, eventB)
else:
yield None, None
def clear(self):
self._queue[:] = [] # atomic, without creating a new list.
def cancel(self, tref):
tref.cancel()
def __len__(self):
return len(self._queue)
def __nonzero__(self):
return True
@property
def queue(self, _pop=heapq.heappop):
"""Snapshot of underlying datastructure."""
events = list(self._queue)
return [_pop(v) for v in [events] * len(events)]
@property
def schedule(self):
return self
|
|
# -*- coding: utf-8 -*-
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
complicated.
"""
from . import util
import re
def build_preprocessors(md, **kwargs):
""" Build the default set of preprocessors used by Markdown. """
preprocessors = util.Registry()
preprocessors.register(NormalizeWhitespace(md), 'normalize_whitespace', 30)
preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20)
preprocessors.register(ReferencePreprocessor(md), 'reference', 10)
return preprocessors
class Preprocessor(util.Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
"""
def run(self, lines):
"""
Each subclass of Preprocessor should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass # pragma: no cover
class NormalizeWhitespace(Preprocessor):
""" Normalize whitespace for consistent parsing. """
def run(self, lines):
source = '\n'.join(lines)
source = source.replace(util.STX, "").replace(util.ETX, "")
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
source = source.expandtabs(self.md.tab_length)
source = re.sub(r'(?<=\n) +\n', '\n', source)
return source.split('\n')
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
right_tag_patterns = ["</%s>", "%s>"]
attrs_pattern = r"""
\s+(?P<attr>[^>"'/= ]+)=(?P<q>['"])(?P<value>.*?)(?P=q) # attr="value"
| # OR
\s+(?P<attr1>[^>"'/= ]+)=(?P<value1>[^> ]+) # attr=value
| # OR
\s+(?P<attr2>[^>"'/= ]+) # attr
"""
left_tag_pattern = r'^\<(?P<tag>[^> ]+)(?P<attrs>(%s)*)\s*\/?\>?' % \
attrs_pattern
attrs_re = re.compile(attrs_pattern, re.VERBOSE)
left_tag_re = re.compile(left_tag_pattern, re.VERBOSE)
markdown_in_raw = False
def _get_left_tag(self, block):
m = self.left_tag_re.match(block)
if m:
tag = m.group('tag')
raw_attrs = m.group('attrs')
attrs = {}
if raw_attrs:
for ma in self.attrs_re.finditer(raw_attrs):
if ma.group('attr'):
if ma.group('value'):
attrs[ma.group('attr').strip()] = ma.group('value')
else:
attrs[ma.group('attr').strip()] = ""
elif ma.group('attr1'):
if ma.group('value1'):
attrs[ma.group('attr1').strip()] = ma.group(
'value1'
)
else:
attrs[ma.group('attr1').strip()] = ""
elif ma.group('attr2'):
attrs[ma.group('attr2').strip()] = ""
return tag, len(m.group(0)), attrs
else:
tag = block[1:].split(">", 1)[0].lower()
return tag, len(tag)+2, {}
def _recursive_tagfind(self, ltag, rtag, start_index, block):
while 1:
i = block.find(rtag, start_index)
if i == -1:
return -1
j = block.find(ltag, start_index)
# if no ltag, or rtag found before another ltag, return index
if (j > i or j == -1):
return i + len(rtag)
# another ltag found before rtag, use end of ltag as starting
# point and search again
j = block.find('>', j)
start_index = self._recursive_tagfind(ltag, rtag, j + 1, block)
if start_index == -1:
# HTML potentially malformed- ltag has no corresponding
# rtag
return -1
def _get_right_tag(self, left_tag, left_index, block):
for p in self.right_tag_patterns:
tag = p % left_tag
i = self._recursive_tagfind(
"<%s" % left_tag, tag, left_index, block
)
if i > 2:
return tag.lstrip("<").rstrip(">"), i
return block.rstrip()[-left_index:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag):
if left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] and right_tag[0] == "/":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def _stringindex_to_listindex(self, stringindex, items):
"""
Same effect as concatenating the strings in items,
finding the character to which stringindex refers in that string,
and returning the index of the item in which that character resides.
"""
items.append('dummy')
i, count = 0, 0
while count <= stringindex:
count += len(items[i])
i += 1
return i - 1
def _nested_markdown_in_html(self, items):
"""Find and process html child elements of the given element block."""
for i, item in enumerate(items):
if self.left_tag_re.match(item):
left_tag, left_index, attrs = \
self._get_left_tag(''.join(items[i:]))
right_tag, data_index = self._get_right_tag(
left_tag, left_index, ''.join(items[i:]))
right_listindex = \
self._stringindex_to_listindex(data_index, items[i:]) + i
if 'markdown' in attrs.keys():
items[i] = items[i][left_index:] # remove opening tag
placeholder = self.md.htmlStash.store_tag(
left_tag, attrs, i + 1, right_listindex + 1)
items.insert(i, placeholder)
if len(items) - right_listindex <= 1: # last nest, no tail
right_listindex -= 1
items[right_listindex] = items[right_listindex][
:-len(right_tag) - 2] # remove closing tag
else: # raw html
if len(items) - right_listindex <= 1: # last element
right_listindex -= 1
if right_listindex <= i:
right_listindex = i + 1
placeholder = self.md.htmlStash.store('\n\n'.join(
items[i:right_listindex]))
del items[i:right_listindex]
items.insert(i, placeholder)
return items
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
text = text.rsplit("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
while text:
block = text[0]
if block.startswith("\n"):
block = block[1:]
text = text[1:]
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<") and len(block.strip()) > 1:
if block[1:4] == "!--":
# is a comment block
left_tag, left_index, attrs = "--", 2, {}
else:
left_tag, left_index, attrs = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag,
left_index,
block)
# keep checking conditions below and maybe just append
if data_index < len(block) and (self.md.is_block_level(left_tag) or left_tag == '--'):
text.insert(0, block[data_index:])
block = block[:data_index]
if not (self.md.is_block_level(left_tag) or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
if self.markdown_in_raw and 'markdown' in attrs.keys():
block = block[left_index:-len(right_tag) - 2]
new_blocks.append(self.md.htmlStash.
store_tag(left_tag, attrs, 0, 2))
new_blocks.extend([block])
else:
new_blocks.append(
self.md.htmlStash.store(block.strip()))
continue
else:
# if is block level tag and is not complete
if (not self._equal_tags(left_tag, right_tag)) and \
(self.md.is_block_level(left_tag) or left_tag == "--"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
self.md.htmlStash.store(block.strip())
)
continue
else:
new_blocks.append(block)
else:
items.append(block)
# Need to evaluate all items so we can calculate relative to the left index.
right_tag, data_index = self._get_right_tag(left_tag, left_index, ''.join(items))
# Adjust data_index: relative to items -> relative to last block
prev_block_length = 0
for item in items[:-1]:
prev_block_length += len(item)
data_index -= prev_block_length
if self._equal_tags(left_tag, right_tag):
# if find closing tag
if data_index < len(block):
# we have more text after right_tag
items[-1] = block[:data_index]
text.insert(0, block[data_index:])
in_tag = False
if self.markdown_in_raw and 'markdown' in attrs.keys():
items[0] = items[0][left_index:]
items[-1] = items[-1][:-len(right_tag) - 2]
if items[len(items) - 1]: # not a newline/empty string
right_index = len(items) + 3
else:
right_index = len(items) + 2
new_blocks.append(self.md.htmlStash.store_tag(
left_tag, attrs, 0, right_index))
placeholderslen = len(self.md.htmlStash.tag_data)
new_blocks.extend(
self._nested_markdown_in_html(items))
nests = len(self.md.htmlStash.tag_data) - \
placeholderslen
self.md.htmlStash.tag_data[-1 - nests][
'right_index'] += nests - 2
else:
new_blocks.append(
self.md.htmlStash.store('\n\n'.join(items)))
items = []
if items:
if self.markdown_in_raw and 'markdown' in attrs.keys():
items[0] = items[0][left_index:]
items[-1] = items[-1][:-len(right_tag) - 2]
if items[len(items) - 1]: # not a newline/empty string
right_index = len(items) + 3
else:
right_index = len(items) + 2
new_blocks.append(
self.md.htmlStash.store_tag(
left_tag, attrs, 0, right_index))
placeholderslen = len(self.md.htmlStash.tag_data)
new_blocks.extend(self._nested_markdown_in_html(items))
nests = len(self.md.htmlStash.tag_data) - placeholderslen
self.md.htmlStash.tag_data[-1 - nests][
'right_index'] += nests - 2
else:
new_blocks.append(
self.md.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
return new_text.split("\n")
class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
TITLE = r'[ ]*(\"(.*)\"|\'(.*)\'|\((.*)\))[ ]*'
RE = re.compile(
r'^[ ]{0,3}\[([^\]]*)\]:\s*([^ ]*)[ ]*(%s)?$' % TITLE, re.DOTALL
)
TITLE_RE = re.compile(r'^%s$' % TITLE)
def run(self, lines):
new_text = []
while lines:
line = lines.pop(0)
m = self.RE.match(line)
if m:
id = m.group(1).strip().lower()
link = m.group(2).lstrip('<').rstrip('>')
t = m.group(5) or m.group(6) or m.group(7)
if not t:
# Check next line for title
tm = self.TITLE_RE.match(lines[0])
if tm:
lines.pop(0)
t = tm.group(2) or tm.group(3) or tm.group(4)
self.md.references[id] = (link, t)
# Preserve the line to prevent raw HTML indexing issue.
# https://github.com/Python-Markdown/markdown/issues/584
new_text.append('')
else:
new_text.append(line)
return new_text # + "\n"
|
|
from common import * # NOQA
from cattle import ApiError
SERVICE_KIND = 'kubernetesService'
def from_context(context):
return context.client, context.agent_client, context.host
def test_bad_agent(super_client, new_context):
_, account, agent_client = register_simulated_host(new_context,
return_agent=True)
def post():
external_id = random_str()
agent_client.create_external_storage_pool_event(
externalId=external_id,
eventType="storagepool.create",
hostUuids=[],
storagePool={
'name': 'name-%s' % external_id,
'externalId': external_id,
})
# Test it works
post()
# Test it fails with two agents
super_client.wait_success(super_client.create_agent(
uri='test://' + random_str(),
accountId=account.id))
with pytest.raises(ApiError) as e:
post()
assert e.value.error.code == 'MissingRequired'
# Test it fails with no agents
for agent in super_client.list_agent(accountId=account.id):
super_client.wait_success(agent.deactivate())
with pytest.raises(ApiError) as e:
post()
assert e.value.error.code == 'CantVerifyAgent'
def test_external_host_event_miss(new_context):
new_context.create_container()
client = new_context.client
host = new_context.host
event = client.create_external_host_event(hostLabel='foo=bar',
eventType='host.evacuate',
deleteHost=True)
event = client.wait_success(event)
host = client.reload(host)
assert event.state == 'created'
assert host.state == 'active'
def test_external_host_event_wrong_event(new_context):
c = new_context.create_container()
client = new_context.client
host = client.update(new_context.host, labels={
'foo': 'bar'
})
host = client.wait_success(host)
assert host.labels == {'foo': 'bar'}
event = client.create_external_host_event(hostLabel='foo=bar',
eventType='host.notevacuate',
deleteHost=True)
assert event.state == 'creating'
event = client.wait_success(event)
host = client.reload(host)
c = client.wait_success(c)
assert event.state == 'created'
assert host.state == 'active'
assert c.state == 'running'
def test_external_host_event_hit(new_context):
c = new_context.create_container()
client = new_context.client
host = client.wait_success(new_context.host)
host = client.update(host, labels={
'foo': 'bar'
})
host = client.wait_success(host)
assert host.labels == {'foo': 'bar'}
event = client.create_external_host_event(hostLabel='foo=bar',
eventType='host.evacuate',
deleteHost=True)
assert event.state == 'creating'
event = client.wait_success(event)
host = client.reload(host)
c = client.wait_success(c)
assert event.state == 'created'
assert host.state == 'purged'
assert c.removed is not None
def test_external_host_event_no_delete(new_context):
c = new_context.create_container()
client = new_context.client
host = client.update(new_context.host, labels={
'foo': 'bar'
})
host = client.wait_success(host)
assert host.labels == {'foo': 'bar'}
event = client.create_external_host_event(hostLabel='foo=bar',
eventType='host.evacuate')
assert event.state == 'creating'
event = client.wait_success(event)
host = client.reload(host)
c = client.wait_success(c)
assert event.state == 'created'
assert host.state == 'inactive'
def test_external_host_event_by_id(new_context):
c = new_context.create_container()
new_host = register_simulated_host(new_context)
client = new_context.client
host = client.update(new_context.host, labels={
'foo': 'bar'
})
host = client.wait_success(host)
assert host.labels == {'foo': 'bar'}
event = client.create_external_host_event(hostId=host.id,
eventType='host.evacuate')
assert event.state == 'creating'
event = client.wait_success(event)
new_host = client.reload(new_host)
c = client.wait_success(c)
host = client.reload(host)
assert event.state == 'created'
assert host.state == 'inactive'
assert new_host.state == 'active'
def test_external_dns_event(super_client, new_context):
client, agent_client, host = from_context(new_context)
stack = client.create_environment(name=random_str())
stack = client.wait_success(stack)
image_uuid = new_context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc1 = client.create_service(name=random_str(),
environmentId=stack.id,
launchConfig=launch_config)
svc1 = client.wait_success(svc1)
domain_name1 = "foo.com"
create_dns_event(client, agent_client, super_client,
new_context, svc1.name,
stack.name, domain_name1)
# wait for dns name to be updated
svc1 = client.reload(svc1)
assert svc1.fqdn == domain_name1
def create_dns_event(client, agent_client, super_client,
context, svc_name1,
stack_name, domain_name):
external_id = random_str()
event_type = "externalDnsEvent"
dns_event = {
'externalId': external_id,
'eventType': event_type,
"stackName": stack_name,
"serviceName": svc_name1,
"fqdn": domain_name
}
event = agent_client.create_external_dns_event(dns_event)
assert event.externalId == external_id
assert event.eventType == event_type
event = wait_for(lambda: event_wait(client, event))
assert event.accountId == context.project.id
assert event.reportedAccountId == context.agent.id
return event
def test_external_service_event_create(client, context, super_client):
agent_client = context.agent_client
env_external_id = random_str()
environment = {"name": "foo", "externalId": env_external_id}
svc_external_id = random_str()
svc_name = 'svc-name-%s' % svc_external_id
selector = 'foo=bar1'
template = {'foo': 'bar'}
svc_data = {
'selectorContainer': selector,
'kind': SERVICE_KIND,
'name': svc_name,
'externalId': svc_external_id,
'template': template,
}
event = agent_client.create_external_service_event(
eventType='service.create',
environment=environment,
externalId=svc_external_id,
service=svc_data,
)
event = wait_for(lambda: event_wait(client, event))
assert event is not None
svc = wait_for(lambda: service_wait(client, svc_external_id))
assert svc.externalId == svc_external_id
assert svc.name == svc_name
assert svc.kind == SERVICE_KIND
assert svc.selectorContainer == selector
assert svc.environmentId is not None
assert svc.template == template
envs = client.list_environment(externalId=env_external_id)
assert len(envs) == 1
assert envs[0].id == svc.environmentId
wait_for_condition(client, svc,
lambda x: x.state == 'active',
lambda x: 'State is: ' + x.state)
# Update
new_selector = 'newselector=foo'
svc_data = {
'selectorContainer': new_selector,
'kind': SERVICE_KIND,
'template': {'foo': 'bar'},
}
agent_client.create_external_service_event(
eventType='service.update',
environment=environment,
externalId=svc_external_id,
service=svc_data,
)
wait_for_condition(client, svc,
lambda x: x.selectorContainer == new_selector,
lambda x: 'Selector is: ' + x.selectorContainer)
# Delete
agent_client.create_external_service_event(
name=svc_name,
eventType='service.remove',
externalId=svc_external_id,
service={'kind': SERVICE_KIND},
)
wait_for_condition(client, svc,
lambda x: x.state == 'removed',
lambda x: 'State is: ' + x.state)
def test_external_stack_event_create(client, context, super_client):
agent_client = context.agent_client
env_external_id = random_str()
environment = {"name": env_external_id, "externalId": env_external_id,
"kind": "environment"}
env = client.create_environment(environment)
env = client.wait_success(env)
service = {
'kind': SERVICE_KIND,
}
event = agent_client.create_external_service_event(
eventType='stack.remove',
environment=environment,
externalId=env_external_id,
service=service,
)
event = wait_for(lambda: event_wait(client, event))
assert event is not None
wait_for(lambda:
len(client.list_environment(externalId=env_external_id)) == 0)
def service_wait(client, external_id):
services = client.list_kubernetes_service(externalId=external_id)
if len(services) and services[0].state == 'active':
return services[0]
def event_wait(client, event):
created = client.by_id('externalEvent', event.id)
if created is not None and created.state == 'created':
return created
|
|
#
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qpython import MetaData
from qtype import * # @UnusedWildImport
_MILIS_PER_DAY = 24 * 60 * 60 * 1000
_EPOCH_QMONTH = numpy.datetime64('2000-01', 'M')
_EPOCH_QDATE = numpy.datetime64('2000-01-01', 'D')
_EPOCH_QDATETIME = numpy.datetime64('2000-01-01T00:00:00.000', 'ms')
_EPOCH_TIMESTAMP = numpy.datetime64('2000-01-01T00:00:00', 'ns')
_QMONTH_NULL = qnull(QMONTH)
_QDATE_NULL = qnull(QDATE)
_QDATETIME_NULL = qnull(QDATETIME)
_QMINUTE_NULL = qnull(QMINUTE)
_QSECOND_NULL = qnull(QSECOND)
_QTIME_NULL = qnull(QTIME)
_QTIMESTAMP_NULL = qnull(QTIMESTAMP)
_QTIMESPAN_NULL = qnull(QTIMESPAN)
class QTemporal(object):
'''
Represents a q temporal value.
The :class:`.QTemporal` wraps `numpy.datetime64` or `numpy.timedelta64`
along with meta-information like qtype indicator.
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
'''
def __init__(self, dt):
self._datetime = dt
def _meta_init(self, **meta):
self.meta = MetaData(**meta)
@property
def raw(self):
'''Return wrapped datetime object.
:returns: `numpy.datetime64` or `numpy.timedelta64` - wrapped datetime
'''
return self._datetime
def __str__(self):
return '%s [%s]' % (self._datetime, self.meta)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.meta.qtype == other.meta.qtype
and self._datetime == other._datetime)
def __ne__(self, other):
return not self.__eq__(other)
def qtemporal(dt, **meta):
'''Converts a `numpy.datetime64` or `numpy.timedelta64` to
:class:`.QTemporal` and enriches object instance with given meta data.
Examples:
>>> qtemporal(numpy.datetime64('2001-01-01', 'D'), qtype=QDATE)
2001-01-01 [metadata(qtype=-14)]
>>> qtemporal(numpy.timedelta64(43499123, 'ms'), qtype=QTIME)
43499123 milliseconds [metadata(qtype=-19)]
>>> qtemporal(qnull(QDATETIME), qtype=QDATETIME)
nan [metadata(qtype=-15)]
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
:Kwargs:
- `qtype` (`integer`) - qtype indicator
:returns: `QTemporal` - wrapped datetime
'''
result = QTemporal(dt)
result._meta_init(**meta)
return result
def from_raw_qtemporal(raw, qtype):
'''
Converts raw numeric value to `numpy.datetime64` or `numpy.timedelta64`
instance.
Actual conversion applied to raw numeric value depends on `qtype` parameter.
:Parameters:
- `raw` (`integer`, `float`) - raw representation to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.datetime64` or `numpy.timedelta64` - converted datetime
'''
return _FROM_Q[qtype](raw)
def to_raw_qtemporal(dt, qtype):
'''
Converts datetime/timedelta instance to raw numeric value.
Actual conversion applied to datetime/timedelta instance depends on `qtype`
parameter.
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime/timedelta
object to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `integer`, `float` - raw numeric value
'''
return _TO_Q[qtype](dt)
def array_from_raw_qtemporal(raw, qtype):
'''
Converts `numpy.array` containing raw q representation to ``datetime64``/``timedelta64``
array.
Examples:
>>> raw = numpy.array([366, 121, qnull(QDATE)])
>>> print array_from_raw_qtemporal(raw, qtype = QDATE)
['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `raw` (`numpy.array`) - numpy raw array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with ``datetime64``/``timedelta64``
:raises: `ValueError`
'''
if not isinstance(raw, numpy.ndarray):
raise ValueError('raw parameter is expected to be of type: numpy.ndarray. Was: %s' % type(raw))
qtype = -abs(qtype)
conversion = _FROM_RAW_LIST[qtype]
mask = raw == qnull(qtype)
dtype = PY_TYPE[qtype]
array = raw.astype(dtype) if dtype != raw.dtype else raw
array = conversion(array) if conversion else array
null = _NUMPY_NULL[qtype]
array = numpy.where(mask, null, array)
return array
def array_to_raw_qtemporal(array, qtype):
'''
Converts `numpy.array` containing ``datetime64``/``timedelta64`` to raw
q representation.
Examples:
>>> na_dt = numpy.arange('1999-01-01', '2005-12-31', dtype='datetime64[D]')
>>> print array_to_raw_qtemporal(na_dt, qtype = QDATE_LIST)
[-365 -364 -363 ..., 2188 2189 2190]
>>> array_to_raw_qtemporal(numpy.arange(-20, 30, dtype='int32'), qtype = QDATE_LIST)
Traceback (most recent call last):
...
ValueError: array.dtype is expected to be of type: datetime64 or timedelta64. Was: int32
:Parameters:
- `array` (`numpy.array`) - numpy datetime/timedelta array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with raw values
:raises: `ValueError`
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray. Was: %s' % type(array))
if not array.dtype.type in (numpy.datetime64, numpy.timedelta64):
raise ValueError('array.dtype is expected to be of type: datetime64 or timedelta64. Was: %s' % array.dtype)
qtype = -abs(qtype)
conversion = _TO_RAW_LIST[qtype]
raw = array.view(numpy.int64).view(numpy.ndarray)
mask = raw == numpy.int64(-2 ** 63)
raw = conversion(raw) if conversion else raw
null = qnull(qtype)
raw = numpy.where(mask, null, raw)
return raw
def _from_qmonth(raw):
if raw == _QMONTH_NULL:
return _NUMPY_NULL[QMONTH]
else:
return _EPOCH_QMONTH + numpy.timedelta64(int(raw), 'M')
def _to_qmonth(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QMONTH).astype(int) if not dt == _NUMPY_NULL[QMONTH] else _QMONTH_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qdate(raw):
if raw == _QDATE_NULL:
return _NUMPY_NULL[QDATE]
else:
return _EPOCH_QDATE + numpy.timedelta64(int(raw), 'D')
def _to_qdate(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QDATE).astype(int) if not dt == _NUMPY_NULL[QDATE] else _QDATE_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qdatetime(raw):
if numpy.isnan(raw) or raw == _QDATETIME_NULL:
return _NUMPY_NULL[QDATETIME]
else:
return _EPOCH_QDATETIME + numpy.timedelta64(long(_MILIS_PER_DAY * raw), 'ms')
def _to_qdatetime(dt):
t_dt = type(dt)
if t_dt == numpy.float64:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QDATETIME).astype(float) / _MILIS_PER_DAY if not dt == _NUMPY_NULL[QDATETIME] else _QDATETIME_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qminute(raw):
if raw == _QMINUTE_NULL:
return _NUMPY_NULL[QMINUTE]
else:
return numpy.timedelta64(int(raw), 'm')
def _to_qminute(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QMINUTE] else _QMINUTE_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qsecond(raw):
if raw == _QSECOND_NULL:
return _NUMPY_NULL[QSECOND]
else:
return numpy.timedelta64(int(raw), 's')
def _to_qsecond(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QSECOND] else _QSECOND_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtime(raw):
if raw == _QTIME_NULL:
return _NUMPY_NULL[QTIME]
else:
return numpy.timedelta64(int(raw), 'ms')
def _to_qtime(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QTIME] else _QTIME_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtimestamp(raw):
if raw == _QTIMESTAMP_NULL:
return _NUMPY_NULL[QTIMESTAMP]
else:
return _EPOCH_TIMESTAMP + numpy.timedelta64(long(raw), 'ns')
def _to_qtimestamp(dt):
t_dt = type(dt)
if t_dt == numpy.int64:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_TIMESTAMP).astype(long) if not dt == _NUMPY_NULL[QTIMESTAMP] else _QTIMESTAMP_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtimespan(raw):
if raw == _QTIMESPAN_NULL:
return _NUMPY_NULL[QTIMESPAN]
else:
return numpy.timedelta64(long(raw), 'ns')
def _to_qtimespan(dt):
t_dt = type(dt)
if t_dt == numpy.int64:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(long) if not dt == _NUMPY_NULL[QTIMESPAN] else _QTIMESTAMP_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
_FROM_Q = {
QMONTH: _from_qmonth,
QDATE: _from_qdate,
QDATETIME: _from_qdatetime,
QMINUTE: _from_qminute,
QSECOND: _from_qsecond,
QTIME: _from_qtime,
QTIMESTAMP: _from_qtimestamp,
QTIMESPAN: _from_qtimespan,
}
_TO_Q = {
QMONTH: _to_qmonth,
QDATE: _to_qdate,
QDATETIME: _to_qdatetime,
QMINUTE: _to_qminute,
QSECOND: _to_qsecond,
QTIME: _to_qtime,
QTIMESTAMP: _to_qtimestamp,
QTIMESPAN: _to_qtimespan,
}
__EPOCH_QDATETIME_MS = _EPOCH_QDATETIME.astype(long)
__MILIS_PER_DAY_FLOAT = float(_MILIS_PER_DAY)
__EPOCH_QTIMESTAMP_NS = _EPOCH_TIMESTAMP.astype(long)
_TO_RAW_LIST = {
QMONTH: lambda a: (a - 360).astype(numpy.int32),
QDATE: lambda a: (a - 10957).astype(numpy.int32),
QDATETIME: lambda a: ((a - __EPOCH_QDATETIME_MS) / __MILIS_PER_DAY_FLOAT).astype(numpy.float64),
QMINUTE: lambda a: a.astype(numpy.int32),
QSECOND: lambda a: a.astype(numpy.int32),
QTIME: lambda a: a.astype(numpy.int32),
QTIMESTAMP: lambda a: a - __EPOCH_QTIMESTAMP_NS,
QTIMESPAN: None,
}
_FROM_RAW_LIST = {
QMONTH: lambda a: numpy.array((a + 360), dtype = 'datetime64[M]'),
QDATE: lambda a: numpy.array((a + 10957), dtype = 'datetime64[D]'),
QDATETIME: lambda a: numpy.array((a * _MILIS_PER_DAY + __EPOCH_QDATETIME_MS), dtype = 'datetime64[ms]'),
QMINUTE: lambda a: numpy.array(a, dtype = 'timedelta64[m]'),
QSECOND: lambda a: numpy.array(a, dtype = 'timedelta64[s]'),
QTIME: lambda a: numpy.array(a, dtype = 'timedelta64[ms]'),
QTIMESTAMP: lambda a: numpy.array((a + __EPOCH_QTIMESTAMP_NS), dtype = 'datetime64[ns]'),
QTIMESPAN: lambda a: numpy.array(a, dtype = 'timedelta64[ns]'),
}
_NUMPY_NULL = {
QMONTH: numpy.datetime64('NaT', 'M'),
QDATE: numpy.datetime64('NaT', 'D'),
QDATETIME: numpy.datetime64('NaT', 'ms'),
QMINUTE: numpy.timedelta64('NaT', 'm'),
QSECOND: numpy.timedelta64('NaT', 's'),
QTIME: numpy.timedelta64('NaT', 'ms'),
QTIMESTAMP: numpy.datetime64('NaT', 'ns'),
QTIMESPAN: numpy.timedelta64('NaT', 'ns'),
}
|
|
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# this program simply creates the configuration file needed by the agent
# it is assumed that all of the directories have already been created with
# the proper permissions
from __future__ import print_function
import argparse
import configparser
import glob
import os
import shutil
import sys
import textwrap
import urllib.parse
import dcm.agent
import dcm.agent.cloudmetadata as cloudmetadata
import dcm.agent.config as config
import dcm.agent.utils as agent_utils
# below are the variables with no defaults that must be determined
cloud_choice = None
g_user_env_str = "DCM_USER"
g_basedir_env_str = "DCM_BASEDIR"
_g_bundled_cert_file = "/opt/dcm-agent/embedded/ssl/certs/cacert.pem"
_g_cert_warning = """
*****************************************************************************
WARNING
-------
The certificate file %s is bundled /opt/dcm-agent/embedded/ssl/certs/cacert.pem
with the agent and must be maintained manually. There is no daemon running
that will update the certificates in it or enforce revocation policies.
*****************************************************************************
""" % _g_bundled_cert_file
cloud_choices = [i for i in
dir(cloudmetadata.CLOUD_TYPES) if not i.startswith("_")]
def setup_command_line_parser():
parser = argparse.ArgumentParser(
description='DCM Agent Installer for linux.')
parser.add_argument("--cloud", "-c", metavar="{Amazon, etc...}",
dest="cloud",
help="The cloud where this virtual machine will be "
"run. Options: %s" % ", ".join(cloud_choices))
parser.add_argument("--url", "-u", dest="url",
help="The location of the dcm web socket listener")
parser.add_argument("--verbose", "-v", dest="verbose",
action='store_true',
default=False,
help="Increase the amount of output produced by the "
"script.")
parser.add_argument("--initial", "-I", dest="initial",
action='store_true',
default=False,
help=argparse.SUPPRESS)
parser.add_argument("--interactive", "-i", dest="interactive",
action='store_true',
default=False,
help="Run an interactive session where questions "
"will be asked and answered vi stdio.")
parser.add_argument("--base-path", "-p",
dest="base_path",
help="The path to enstratius")
parser.add_argument("--mount-point", "-m",
dest="mount_path",
help="The path to mount point")
parser.add_argument("--on-boot", "-B",
dest="on_boot",
action='store_true',
default=False,
help="Setup the agent to start when the VM boots")
parser.add_argument("--reload-conf", "-r",
dest="reload",
help="The previous config file that will be used "
"to populate defaults.")
parser.add_argument("--rewrite-logging-plugin", "-R",
dest="rewrite_logging_plugin",
action="store_true",
default=False,
help="When reconfiguring the agent with -r option "
"You can additionally specifiy this option to"
"force the overwrite of plugin and logging configs.")
parser.add_argument("--temp-path", "-t",
dest="temp_path",
help="The temp path")
parser.add_argument("--user", "-U",
dest="user",
help="The system user that will run the agent.")
parser.add_argument("--connection-type", "-C",
dest="con_type",
help="The type of connection that will be formed "
"with the agent manager.")
parser.add_argument("--logfile", "-l", dest="logfile")
parser.add_argument("--loglevel", "-L",
dest="loglevel",
default="INFO",
help="The level of logging for the agent.")
parser.add_argument("--install-extras",
dest="install_extras",
action='store_true',
help='to install addition set of packages (puppet etc)'
' now which are needed for certain actions. '
'If this is not set at boot time the packages '
'will still be downloaded and installed on '
'demand')
parser.add_argument("--extra-package-location",
dest="extra_package_location",
default="https://linux-stable-agent.enstratius.com/",
help="The URL of the dcm-agent-extras package which "
"contains additional software dependencies "
"needed for some commands.")
parser.add_argument("--package-name",
dest="package_name",
default=None,
help="Name of the extra package to be installed.")
parser.add_argument("--chef-client", "-o", dest="chef_client",
action='store_true',
default=False,
help="Install the chef client")
parser.add_argument("--chef-client-version", dest="chef_client_version",
default="11.16.4",
help="Version of the chef client to be installed.")
parser.add_argument("--allow-unknown-certs", "-Z",
dest="allow_unknown_certs",
action='store_true',
default=False,
help="Disable cert validation. In general this is a"
"bad idea but is very useful for testing.")
parser.add_argument("--cacert-file", "-A", dest="cacert_file",
default=None)
parser.add_argument("--intrusion-detection-ossec", "-d",
dest="intrusion_detection_ossec",
default="False",
help="Boolean to install and start ossec. In addition the agent will process alerts."
" The value must be one of [yes,no,true,false]")
parser.add_argument("--ids-alert-threshold", "-T",
dest="ids_alert_threshold",
default=10,
help="The value at which the agent will not send alerts to DCM.")
return parser
def _get_input(prompt):
return input(prompt)
def select_cloud(default=cloudmetadata.CLOUD_TYPES.Amazon):
"""
:param default:
:return:
"""
for i, cloud_name in enumerate(cloud_choices):
col = "%2d) %-13s" % (i, cloud_name)
print(col)
cloud = None
while cloud is None:
input_str = _get_input("Select your cloud (%s): " % default)
input_str = input_str.strip().lower()
if not input_str:
input_str = default.lower()
if input_str in [i.lower() for i in cloud_choices]:
return input_str
try:
ndx = int(input_str)
cloud = cloud_choices[ndx]
except:
print("%s is not a valid choice." % input_str)
return cloud
def guess_default_cloud(conf_d):
(h, cloud_name) = conf_d["cloud"]["type"]
if cloud_name != cloudmetadata.CLOUD_TYPES.UNKNOWN:
return
conf = config.AgentConfig([])
name = cloudmetadata.guess_effective_cloud(conf)
if name is None:
raise Exception("Cloud %s is not a known type." % cloud_name)
print("The detected cloud is " + name)
conf_d["cloud"]["type"] = (h, name)
def normalize_cloud_name(conf_d):
(h, cloud) = conf_d["cloud"]["type"]
name = cloudmetadata.normalize_cloud_name(cloud)
if name is None:
raise Exception("Cloud %s is not a known type." % cloud)
conf_d["cloud"]["type"] = (h, name)
def pick_meta_data(conf_d):
(_, cloud) = conf_d["cloud"]["type"]
if cloud == cloudmetadata.CLOUD_TYPES.Amazon:
mu = "http://169.254.169.254/latest/meta-data/"
elif cloud == cloudmetadata.CLOUD_TYPES.OpenStack:
mu = "http://169.254.169.254/openstack/2012-08-10/meta_data.json"
elif cloud == cloudmetadata.CLOUD_TYPES.Google:
mu = "http://metadata.google.internal/computeMetadata/v1"
elif cloud == cloudmetadata.CLOUD_TYPES.DigitalOcean:
mu = "http://169.254.169.254/metadata/v1"
elif cloud == cloudmetadata.CLOUD_TYPES.CloudStack:
mu = None
elif cloud == cloudmetadata.CLOUD_TYPES.CloudStack3:
mu = None
else:
return None
(h, _) = conf_d["cloud"]["metadata_url"]
conf_d["cloud"]["metadata_url"] = (h, mu)
def get_default_conf_dict():
conf_dict = {}
option_list = config.build_options_list()
for c in option_list:
if c.hidden:
continue
s_d = {}
if c.section in conf_dict:
s_d = conf_dict[c.section]
else:
conf_dict[c.section] = s_d
s_d[c.name] = (c.get_help(), c.get_default())
return conf_dict
def update_from_config_file(conf_file, conf_dict):
# pull from the existing config file
parser = configparser.ConfigParser()
parser.read([conf_file])
for s in parser.sections():
if s in conf_dict:
sd = conf_dict[s]
else:
sd = {}
conf_dict[s] = sd
items_list = parser.items(s)
for (key, value) in items_list:
help_str = None
if key in conf_dict:
(help_str, _) = sd[key]
sd[key] = (help_str, value)
def write_conf_file(dest_filename, conf_dict):
with open(dest_filename, "w") as fptr:
for section_name in conf_dict:
sd = conf_dict[section_name]
fptr.write("[%s]%s" % (section_name, os.linesep))
for item_name in sd:
(help_str, value) = sd[item_name]
if help_str:
help_lines = textwrap.wrap(help_str, 79)
for h in help_lines:
fptr.write("# %s%s" % (h, os.linesep))
if value is None:
fptr.write("#%s=" % item_name)
else:
if type(value) == list:
value = str(value)[1:-1]
fptr.write("%s=%s" % (item_name, str(value)))
fptr.write(os.linesep)
fptr.write(os.linesep)
def make_dirs(conf_d):
(_, base_path) = conf_d["storage"]["base_dir"]
dirs_to_make = [
(base_path, 0o755),
(os.path.join(base_path, "bin"), 0o750),
(conf_d["storage"]["script_dir"][1], 0o750),
(os.path.join(base_path, "etc"), 0o700),
(os.path.join(base_path, "logs"), 0o700),
(os.path.join(base_path, "home"), 0o750),
(os.path.join(base_path, "secure"), 0o700),
(conf_d["storage"]["temppath"][1], 0o1777),
]
for (directory, mod) in dirs_to_make:
try:
os.mkdir(directory)
except OSError as ex:
if ex.errno != 17:
raise
os.chmod(directory, mod)
print("...Done.")
def do_set_owner_and_perms(conf_d):
(_, script_dir) = conf_d["storage"]["script_dir"]
(_, base_path) = conf_d["storage"]["base_dir"]
(_, user) = conf_d["system"]["user"]
for f in os.listdir(script_dir):
os.chmod(os.path.join(script_dir, f), 0o550)
with open(os.path.join(script_dir, "variables.sh"), "w") as fptr:
fptr.write("DCM_USER=%s" % user)
fptr.write(os.linesep)
fptr.write("DCM_BASEDIR=%s" % base_path)
fptr.write(os.linesep)
fptr.write(os.linesep)
print("Changing ownership to %s:%s" % (user, user))
os.system("chown -R %s:%s %s" % (user, user, base_path))
def merge_opts(conf_d, opts):
map_opts_to_conf = {
"cloud": ("cloud", "type"),
"user": ("system", "user"),
"url": ("connection", "agentmanager_url"),
"base_path": ("storage", "base_dir"),
"temp_path": ("storage", "temppath"),
"con_type": ("connection", "type"),
"mount_path": ("storage", "mountpoint"),
"extra_package_location": ("extra", "location"),
"package_name": ("extra", "package_name"),
"allow_unknown_certs": ("connection", "allow_unknown_certs"),
"cacert_file": ("connection", "ca_cert"),
"intrusion_detection_ossec": ("intrusion_detection", "ossec"),
"ids_alert_threshold": ("intrusion_detection", "alert_threshold"),
"chef_client_version": ("configuration_management", "chef_client_version")
}
for opts_name in map_opts_to_conf:
(s, i) = map_opts_to_conf[opts_name]
if s not in conf_d:
conf_d[s] = {}
sd = conf_d[s]
v = getattr(opts, opts_name, None)
h = None
if i in sd:
(h, _) = sd[i]
if v is not None:
sd[i] = (h, v)
def do_plugin_conf(conf_d):
(_, base_dir) = conf_d["storage"]["base_dir"]
(_, dest_plugin_path) = conf_d["plugin"]["configfile"]
root_dir = dcm.agent.get_root_location()
src_pluggin_path = os.path.join(root_dir, "etc", "plugin.conf")
shutil.copy(src_pluggin_path, dest_plugin_path)
def do_logging_conf(conf_d, opts):
(_, base_dir) = conf_d["storage"]["base_dir"]
(_, dest_logging_path) = conf_d["logging"]["configfile"]
root_dir = dcm.agent.get_root_location()
src_logging_path = os.path.join(root_dir, "etc", "logging.yaml")
shutil.copy(src_logging_path, dest_logging_path)
if opts.logfile is None:
log_file = os.path.join(base_dir, "logs", "agent.log")
else:
log_file = opts.logfile
with open(src_logging_path, "r") as fptr:
lines = fptr.readlines()
with open(dest_logging_path, "w") as fptr:
for line in lines:
line = line.replace("@LOGFILE_PATH@", log_file)
line = line.replace("@LOG_LEVEL@", opts.loglevel)
line = line.replace("@DCM_USER@", conf_d["system"]["user"][1])
fptr.write(line)
def copy_scripts(conf_d):
(h, dest_dir) = conf_d["storage"]["script_dir"]
src_script_dir = os.path.join(
config.get_python_script_dir(), 'common-linux')
for s in glob.glob("%s/*" % os.path.abspath(src_script_dir)):
if os.path.isfile(s):
d = os.path.basename(s)
shutil.copy(s, os.path.join(dest_dir, d))
def update_relative_paths(conf_d):
(_, base_dir) = conf_d["storage"]["base_dir"]
def _val_update(section_name, item_name, default_val):
h = ""
try:
(h, val) = conf_d[section_name][item_name]
except:
val = None
if val is None:
val = os.path.join(base_dir, default_val)
conf_d[section_name][item_name] = (h, val)
_val_update("logging", "configfile", "etc/logging.yaml")
_val_update("plugin", "configfile", "etc/plugin.conf")
_val_update("storage", "script_dir", "bin")
def get_url(default=None):
if not default:
default = "wss://dcm.enstratius.com/agentManager"
print("Please enter the contact string of the agent manager (%s)"
% default)
url = sys.stdin.readline().strip()
if not url:
return default
# validate
try:
up = urllib.parse.urlparse(url)
if up.port is not None:
int(up.port)
except Exception:
raise Exception("The agent manager contact %s is not a valid url"
% url)
allowed_schemes = ["ws", "wss"]
if up.scheme not in allowed_schemes:
raise Exception("The url %s does not consist of an allowed scheme. "
"Only the follow schemes are allows %s"
% (url, str(allowed_schemes)))
return url
def enable_start_agent(opts):
ask = opts.interactive
on_boot = opts.on_boot
if on_boot:
ask = False
if ask:
print("Would you like to start the agent on boot? (Y/n)")
ans = sys.stdin.readline().strip()
on_boot = ans == "" or ans.lower() == "y" or ans.lower() == "yes"
if on_boot:
if os.path.exists("/sbin/insserv"):
os.system("/sbin/insserv dcm-agent")
elif os.path.exists("/usr/sbin/update-rc.d"):
os.system("update-rc.d dcm-agent defaults")
elif os.path.exists("/sbin/chkconfig"):
os.system("/sbin/chkconfig --add dcm-agent")
# TODO other platforms
def guess_cacert_location():
possible_locations = ["/etc/ssl/certs/ca-bundle.crt",
"/etc/ssl/certs/ca-certificates.crt",
_g_bundled_cert_file]
for l in possible_locations:
if os.path.exists(l):
return l
def cert_check():
print("Would you like to disable certificate checking? (not recommended) (y/N)")
ans = sys.stdin.readline().strip()
return ans == ans.lower() == "y" or ans.lower() == "yes"
def interactive_cert_path():
default_path = guess_cacert_location()
print("Please select a default path for certificate file (%s)" %
default_path)
ans = sys.stdin.readline().strip()
if not ans:
ans = default_path
return ans
def do_interactive(opts, conf_d):
if not opts.interactive:
return
(h, cloud_type) = conf_d["cloud"]["type"]
cloud_type = select_cloud(default=cloud_type)
conf_d["cloud"]["type"] = (h, cloud_type)
(h, url) = conf_d["connection"]["agentmanager_url"]
url = get_url(default=url)
conf_d["connection"]["agentmanager_url"] = (h, url)
check_certs = cert_check()
conf_d["connection"]["allow_unknown_certs"] = (h, check_certs)
(h, cert_path) = conf_d["connection"]["ca_cert"]
if not check_certs and cert_path is None:
cert_path = interactive_cert_path()
conf_d["connection"]["ca_cert"] = (h, cert_path)
def validate_cacerts(conf_d):
(h, val) = conf_d["connection"]["allow_unknown_certs"]
if val:
return
(h, val) = conf_d["connection"]["ca_cert"]
if val is None:
val = guess_cacert_location()
if val is None:
raise Exception("If the unknown certificates are not allowed you must specify a cert file with the --cacert-file option.")
conf_d["connection"]["ca_cert"] = (h, val)
if val == _g_bundled_cert_file:
print(_g_cert_warning)
def gather_values(opts):
# get the default values based on the defaults set in the config object
conf_d = get_default_conf_dict()
# if we are reloading from a file override the defaults with what is in
# that file
if opts.reload:
update_from_config_file(opts.reload, conf_d)
# override any values passed in via options
merge_opts(conf_d, opts)
# set defaults for relative paths
update_relative_paths(conf_d)
return conf_d
def cleanup_previous_install(conf_d):
# delete old DB if it exists
(_, db_file) = conf_d['storage']['dbfile']
if db_file and os.path.exists(db_file):
os.remove(db_file)
def main(argv=sys.argv[1:]):
parser = setup_command_line_parser()
opts = parser.parse_args(args=argv)
opts.loglevel = opts.loglevel.upper()
if opts.loglevel not in ["ERROR", "WARN", "INFO", "DEBUG"]:
print("WARNING: %s is an invalid log level. Using INFO"
% opts.loglevel)
opts.loglevel = "INFO"
opts.intrusion_detection_ossec = opts.intrusion_detection_ossec.lower()
opts.intrusion_detection_ossec =\
opts.intrusion_detection_ossec in ['y', 'yes', 't', 'true']
conf_d = gather_values(opts)
if not opts.initial:
guess_default_cloud(conf_d)
do_interactive(opts, conf_d)
normalize_cloud_name(conf_d)
pick_meta_data(conf_d)
validate_cacerts(conf_d)
# before writing anything make sure that all the needed values are
# set
if not opts.initial:
if not conf_d["system"]["user"]:
raise Exception("You must set the user name that will run "
"this service.")
if not conf_d["storage"]["base_dir"]:
raise Exception("You must set the base dir for this service "
"installation.")
try:
make_dirs(conf_d)
(_, base_dir) = conf_d["storage"]["base_dir"]
if not opts.reload:
copy_scripts(conf_d)
do_plugin_conf(conf_d)
do_logging_conf(conf_d, opts)
else:
if not os.path.isfile(os.path.join(base_dir, "etc", "plugin.conf")) or opts.rewrite_logging_plugin:
do_plugin_conf(conf_d)
if not os.path.isfile(os.path.join(base_dir, "etc", "logging.yaml")) or opts.rewrite_logging_plugin:
do_logging_conf(conf_d, opts)
cleanup_previous_install(conf_d)
conf_file_name = os.path.join(base_dir, "etc", "agent.conf")
write_conf_file(conf_file_name, conf_d)
do_set_owner_and_perms(conf_d)
if not opts.initial:
enable_start_agent(opts)
conf = config.AgentConfig([conf_file_name])
if opts.install_extras:
if opts.package_name:
agent_utils.install_extras(conf, package=opts.package_name)
else:
agent_utils.install_extras(conf)
if opts.intrusion_detection_ossec and not agent_utils.ossec_installed(conf):
# call out to install ossec
agent_utils.install_ossec(conf)
except Exception as ex:
print(str(ex), file=sys.stderr)
if opts.verbose:
raise
return 1
return 0
if __name__ == "__main__":
rc = main()
sys.exit(rc)
|
|
"""
Plots Arctic daily sea ice extent from June 2002-present using JAXA metadata
Website : https://ads.nipr.ac.jp/vishop/vishop-extent.html
Author : Zachary M. Labe
Date : 15 May 2016
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import datetime
import urllib.request
import urllib as UL
### Directory and time
directoryfigure = './Figures/'
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day-1)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
### Load url
url = 'https://ads.nipr.ac.jp/vishop.ver1/data/graph/plot_extent_n_v2.csv'
### Read file
raw_data = urllib.request.urlopen(url)
dataset = np.genfromtxt(raw_data, skip_header=0,delimiter=",",)
### Set missing data to nan
dataset[np.where(dataset==-9999)] = np.nan
### Variables
month = dataset[1:,0] # 1-12, nan as month[0]
day = dataset[1:,1] # 1-31, nan as day[0]
mean1980 = dataset[1:,2] # km^2, nan as mean1980[0]
mean1990 = dataset[1:,3] # km^2, nan as mean1990[0]
mean2000 = dataset[1:,4] # km^2, nan as mean2000[0]
years = dataset[1:,5:]
doy = np.arange(0,len(day),1)
### Change units to million km^2
years = years/1e6
### Recent day of current year
currentyear = years[:,-1]
lastday = now.timetuple().tm_yday -1
currentice = currentyear[lastday]
currentanom = currentice - (mean1980[lastday]/1e6)
# Leap year
currentyear[59] = currentyear[58]
### Changes in the last day and week
weekchange = currentice - currentyear[lastday-7]
daychange = currentice - currentyear[lastday-1]
###############################################################################
###############################################################################
###############################################################################
### Plot figure
matplotlib.rc('savefig', facecolor='black')
matplotlib.rc('axes', edgecolor='white')
matplotlib.rc('xtick', color='white')
matplotlib.rc('ytick', color='white')
matplotlib.rc('axes', labelcolor='white')
matplotlib.rc('axes', facecolor='black')
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
fig = plt.figure()
ax = plt.subplot(111)
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 0))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
oldaverage = currentyear.copy()
oldaverage[lastday:] = currentyear[lastday]
### 2000s average
average2000s = mean2000.copy()
average2000s[lastday:] = mean2000[lastday]
average2000s = average2000s/1e6
oldmin = np.where(mean2000 == np.min(mean2000))[0]
### 1990s average
average1990s = mean1990.copy()
average1990s[lastday:] = mean1990[lastday]
average1990s = average1990s/1e6
### 1980s average
average1980s = mean1980.copy()
average1980s[lastday:] = mean1980[lastday]
average1980s = average1980s/1e6
difference = (oldmin - lastday)[0]
### Are we below decadal climatological min?
if (currentyear[lastday]*1e6) < np.nanmin(mean1980):
print( True, '1980')
if (currentyear[lastday]*1e6) < np.nanmin(mean1990):
print(True, '1990')
if (currentyear[lastday]*1e6) < np.nanmin(mean2000):
print(True, '2000')
### Calculate record low SIE
recordlow = np.empty((years.shape[0]))
for i in range(years.shape[0]):
if years[i,-1] == np.nanmin(years[i,:]):
recordlow[i] = 1.
else:
recordlow[i] = 0.
### Begin plot
plt.plot(doy,years[:,:],color='w',linewidth=0.15,
linestyle='-',alpha=0.7)
bar = ax.plot(doy,currentyear,linewidth=2.9,zorder=3,
color='darkorange',)
plt.scatter(doy[lastday],currentyear[lastday],
s=20,color='darkorange',zorder=4)
plt.scatter(doy[lastday],mean2000[lastday]/1e6,
s=20,color='dodgerblue',zorder=11)
plt.scatter(doy[lastday],mean1990[lastday]/1e6,
s=20,color='c',zorder=11)
plt.scatter(doy[lastday],mean1980[lastday]/1e6,
s=20,color='darkmagenta',zorder=11)
plt.plot(doy,mean1980/1e6,linewidth=1.8,linestyle='-',
color='darkmagenta',label=r'1980s Mean')
plt.plot(doy,mean1990/1e6,linewidth=1.8,linestyle='-',
color='c',label=r'1990s Mean')
plt.plot(doy,mean2000/1e6,linewidth=1.8,linestyle='-',
color='dodgerblue',label=r'2000s Mean')
plt.plot(oldaverage,color='darkorange',linestyle=':',linewidth=2.8,zorder=5)
plt.plot(average2000s,color='dodgerblue',linestyle=':',linewidth=1.8,zorder=11)
plt.plot(average1990s,color='c',linestyle=':',linewidth=1.8,zorder=11)
plt.plot(average1980s,color='darkmagenta',linestyle=':',linewidth=1.8,zorder=11)
### Define date
xlabels = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul',
r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan']
strmonth = xlabels[int(currentmn)-1]
asof = strmonth + ' ' + currentdy + ', ' + currentyr
### Add additional information to the plot
xcord = 109
ycord = 9.4
if recordlow[lastday] == 1.0:
plt.text(xcord + 2,ycord,r'\textbf{[*Record Low*]}',fontsize=11,
rotation='horizontal',ha='left',color='aqua')
xcord = lastday - 5.5
ycord = round(currentice)-0.8
plt.text(31.4,16.9,r'\textbf{DATA:} JAXA 2002-2017 (Arctic Data archive System, NIPR)',
fontsize=5,rotation='horizontal',ha='left',color='w',alpha=0.6)
plt.text(31.4,16.7,r'\textbf{SOURCE:} https://ads.nipr.ac.jp/vishop/vishop-extent.html',
fontsize=5,rotation='horizontal',ha='left',color='w',alpha=0.6)
plt.text(31.4,16.5,r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',
fontsize=5,rotation='horizontal',ha='left',color='w',alpha=0.6)
plt.text(doy[lastday]+8,currentyear[lastday]-0.4,r'\textbf{$\bf{\longrightarrow}$}',
fontsize=18,rotation=140,ha='right',color='darkorange')
plt.text(122,currentyear[lastday]-0.2,r'\textbf{CURRENT}',
fontsize=9.5,rotation='horizontal',ha='left',
color='darkorange',alpha=1)
plt.text(122,mean2000[lastday]/1e6-0.2,r'2000s',
fontsize=10,rotation='horizontal',ha='left',
color='dodgerblue')
plt.text(122,mean1990[lastday]/1e6-0.2,r'1990s',
fontsize=10,rotation='horizontal',ha='left',
color='c')
plt.text(122,mean1980[lastday]/1e6-0.05,r'1980s',
fontsize=10,rotation='horizontal',ha='left',
color='darkmagenta')
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.tick_params(axis='both', direction='out',length=5.5,width=2,
which='major',pad=7)
plt.ylabel(r'\textbf{Extent [$\bf{\times 10^{6}}$\ \textbf{km}$\bf{^2}$]}',
fontsize=15,alpha=0.6)
l = plt.legend(shadow=False,fontsize=8.5,loc='lower left',
bbox_to_anchor=(0.768, -0.025),fancybox=True,ncol=1,
frameon=False)
for text in l.get_texts():
text.set_color('w')
text.set_alpha(0.6)
plt.xticks(np.arange(0,366,30.4),xlabels,rotation=0,fontsize=11)
ylabels = map(str,np.arange(1,18,1))
plt.yticks(np.arange(1,18,1),ylabels,fontsize=13)
plt.ylim([10,17])
plt.xlim([30.4,121.59])
fig.suptitle(r'\textbf{ARCTIC SEA ICE}',fontsize=28,color='w',alpha=0.6)
plt.savefig(directoryfigure + 'JAXA_seaice_means_xe5.png',dpi=900)
### Print additional information
print('\n')
print('----JAXA Sea Ice Change----')
print('Day 5 = %s km^2' % ((currentyear[lastday-4] - currentyear[lastday-5])*1e6))
print('Day 4 = %s km^2' % ((currentyear[lastday-3] - currentyear[lastday-4])*1e6))
print('Day 3 = %s km^2' % ((currentyear[lastday-2] - currentyear[lastday-3])*1e6))
print('Day 2 = %s km^2' % ((currentyear[lastday-1] - currentyear[lastday-2])*1e6))
print('Day 1 = %s km^2' % ((currentyear[lastday] - currentyear[lastday-1])*1e6))
print('\n' 'Total 5-day Change = %s km^2 \n' % ((currentyear[lastday]-currentyear[lastday-5])*1e6))
print('2017-1980 = %s km^2' % ((currentyear[lastday]*1e6) - mean1980[lastday]))
print('2017-1990 = %s km^2' % ((currentyear[lastday]*1e6) - mean1990[lastday]))
print('2017-2000 = %s km^2' % ((currentyear[lastday]*1e6) - mean2000[lastday]))
print('\n')
|
|
"""Tests for vumi.persist.txriak_manager."""
from twisted.internet.defer import inlineCallbacks
from vumi.persist.model import Manager
from vumi.tests.helpers import VumiTestCase, import_skip
class DummyModel(object):
bucket = "dummy_model"
VERSION = None
MIGRATORS = None
def __init__(self, manager, key, _riak_object=None):
self.manager = manager
self.key = key
self._riak_object = _riak_object
@classmethod
def load(cls, manager, key, result=None):
return manager.load(cls, key, result=result)
def set_riak(self, riak_object):
self._riak_object = riak_object
def get_data(self):
return self._riak_object.get_data()
def set_data(self, data):
self._riak_object.set_data(data)
def add_index(self, index_name, key):
self._riak_object.add_index(index_name, key)
def get_link_key(link):
return link[1]
def unrepr_string(text):
if text.startswith("'"):
# Strip and unescape single quotes
return text[1:-1].replace("\\'", "'")
if text.startswith('"'):
# Strip and unescape double quotes
return text[1:-1].replace('\\"', '"')
# Nothing to strip.
return text
class CommonRiakManagerTests(object):
"""Common tests for Riak managers.
Tests assume self.manager is set to a suitable Riak
manager.
"""
def mkdummy(self, key, data=None, dummy_class=DummyModel):
dummy = dummy_class(self.manager, key)
dummy.set_riak(self.manager.riak_object(dummy, key))
if data is not None:
dummy.set_data(data)
return dummy
def test_from_config(self):
manager_cls = self.manager.__class__
manager = manager_cls.from_config({'bucket_prefix': 'test.'})
self.assertEqual(manager.__class__, manager_cls)
self.assertEqual(manager.load_bunch_size,
manager.DEFAULT_LOAD_BUNCH_SIZE)
self.assertEqual(manager.mapreduce_timeout,
manager.DEFAULT_MAPREDUCE_TIMEOUT)
def test_from_config_with_bunch_size(self):
manager_cls = self.manager.__class__
manager = manager_cls.from_config({'bucket_prefix': 'test.',
'load_bunch_size': 10,
})
self.assertEqual(manager.load_bunch_size, 10)
def test_from_config_with_mapreduce_timeout(self):
manager_cls = self.manager.__class__
manager = manager_cls.from_config({'bucket_prefix': 'test.',
'mapreduce_timeout': 1000,
})
self.assertEqual(manager.mapreduce_timeout, 1000)
def test_from_config_with_store_versions(self):
manager_cls = self.manager.__class__
manager = manager_cls.from_config({
'bucket_prefix': 'test.',
'store_versions': {
'foo.Foo': 3,
'bar.Bar': None,
},
})
self.assertEqual(manager.store_versions, {
'foo.Foo': 3,
'bar.Bar': None,
})
def test_sub_manager(self):
sub_manager = self.manager.sub_manager("foo.")
self.assertEqual(sub_manager.client, self.manager.client)
self.assertEqual(sub_manager.bucket_prefix, 'test.foo.')
def test_bucket_name_on_modelcls(self):
dummy = self.mkdummy("bar")
bucket_name = self.manager.bucket_name(type(dummy))
self.assertEqual(bucket_name, "test.dummy_model")
def test_bucket_name_on_instance(self):
dummy = self.mkdummy("bar")
bucket_name = self.manager.bucket_name(dummy)
self.assertEqual(bucket_name, "test.dummy_model")
def test_bucket_for_modelcls(self):
dummy_cls = type(self.mkdummy("foo"))
bucket1 = self.manager.bucket_for_modelcls(dummy_cls)
bucket2 = self.manager.bucket_for_modelcls(dummy_cls)
self.assertEqual(id(bucket1), id(bucket2))
self.assertEqual(bucket1.get_name(), "test.dummy_model")
def test_riak_object(self):
dummy = DummyModel(self.manager, "foo")
riak_object = self.manager.riak_object(dummy, "foo")
self.assertEqual(riak_object.get_data(), {'$VERSION': None})
self.assertEqual(riak_object.get_content_type(), "application/json")
self.assertEqual(
riak_object.get_bucket().get_name(), "test.dummy_model")
self.assertEqual(riak_object.key, "foo")
@Manager.calls_manager
def test_store_and_load(self):
dummy1 = self.mkdummy("foo", {"a": 1})
result1 = yield self.manager.store(dummy1)
self.assertEqual(dummy1, result1)
dummy2 = yield self.manager.load(DummyModel, "foo")
self.assertEqual(dummy2.get_data(), {"a": 1})
@Manager.calls_manager
def test_delete(self):
dummy1 = self.mkdummy("foo", {"a": 1})
yield self.manager.store(dummy1)
dummy2 = yield self.manager.load(DummyModel, "foo")
yield self.manager.delete(dummy2)
dummy3 = yield self.manager.load(DummyModel, "foo")
self.assertEqual(dummy3, None)
@Manager.calls_manager
def test_load_missing(self):
dummy = self.mkdummy("unknown")
result = yield self.manager.load(DummyModel, dummy.key)
self.assertEqual(result, None)
@Manager.calls_manager
def test_load_all_bunches(self):
yield self.manager.store(self.mkdummy("foo", {"a": 0}))
yield self.manager.store(self.mkdummy("bar", {"a": 1}))
yield self.manager.store(self.mkdummy("baz", {"a": 2}))
self.manager.load_bunch_size = load_bunch_size = 2
keys = ["foo", "unknown", "bar", "baz"]
result_data = []
for result_bunch in self.manager.load_all_bunches(DummyModel, keys):
bunch = yield result_bunch
self.assertTrue(len(bunch) <= load_bunch_size)
result_data.extend(result.get_data() for result in bunch)
result_data.sort(key=lambda d: d["a"])
self.assertEqual(result_data, [{"a": 0}, {"a": 1}, {"a": 2}])
@Manager.calls_manager
def test_run_riak_map_reduce(self):
dummies = [self.mkdummy(str(i), {"a": i}) for i in range(4)]
for dummy in dummies:
dummy.add_index('test_index_bin', 'test_key')
yield self.manager.store(dummy)
mr = self.manager.riak_map_reduce()
mr.index('test.dummy_model', 'test_index_bin', 'test_key')
mr_results = []
def mapper(manager, link):
self.assertEqual(manager, self.manager)
mr_results.append(link)
dummy = self.mkdummy(get_link_key(link))
return manager.load(DummyModel, dummy.key)
results = yield self.manager.run_map_reduce(mr, mapper)
results.sort(key=lambda d: d.key)
expected_keys = [str(i) for i in range(4)]
self.assertEqual([d.key for d in results], expected_keys)
mr_results.sort(key=get_link_key)
self.assertEqual([get_link_key(l) for l in mr_results], expected_keys)
@Manager.calls_manager
def test_run_riak_map_reduce_with_timeout(self):
dummies = [self.mkdummy(str(i), {"a": i}) for i in range(4)]
for dummy in dummies:
dummy.add_index('test_index_bin', 'test_key')
yield self.manager.store(dummy)
# override mapreduce_timeout for testing
self.manager.mapreduce_timeout = 1 # millisecond
mr = self.manager.riak_map_reduce()
mr.index('test.dummy_model', 'test_index_bin', 'test_key')
try:
yield self.manager.run_map_reduce(mr, lambda m, l: None)
except Exception, err:
msg = unrepr_string(str(err))
self.assertTrue(msg.startswith(
"Error running MapReduce operation."))
self.assertTrue(msg.endswith(
"Body: '{\"error\":\"timeout\"}'"))
else:
self.fail("Map reduce operation did not timeout")
@Manager.calls_manager
def test_purge_all(self):
dummy = self.mkdummy("foo", {"baz": 0})
yield self.manager.store(dummy)
yield self.manager.purge_all()
result = yield self.manager.load(DummyModel, dummy.key)
self.assertEqual(result, None)
@Manager.calls_manager
def test_purge_all_clears_bucket_properties(self):
search_enabled = yield self.manager.riak_search_enabled(DummyModel)
self.assertEqual(search_enabled, False)
yield self.manager.riak_enable_search(DummyModel)
search_enabled = yield self.manager.riak_search_enabled(DummyModel)
self.assertEqual(search_enabled, True)
# We need at least one key in here so the bucket can be found and
# purged.
dummy = self.mkdummy("foo", {"baz": 0})
yield self.manager.store(dummy)
yield self.manager.purge_all()
search_enabled = yield self.manager.riak_search_enabled(DummyModel)
self.assertEqual(search_enabled, False)
@Manager.calls_manager
def test_json_decoding(self):
# Some versions of the riak client library use simplejson by
# preference, which breaks some of our unicode assumptions. This test
# only fails when such a version is being used and our workaround
# fails. If we're using a good version of the client library, the test
# will pass even if the workaround fails.
dummy1 = self.mkdummy("foo", {"a": "b"})
result1 = yield self.manager.store(dummy1)
self.assertTrue(isinstance(result1.get_data()["a"], unicode))
dummy2 = yield self.manager.load(DummyModel, "foo")
self.assertEqual(dummy2.get_data(), {"a": "b"})
self.assertTrue(isinstance(dummy2.get_data()["a"], unicode))
@Manager.calls_manager
def test_json_decoding_index_keys(self):
# Some versions of the riak client library use simplejson by
# preference, which breaks some of our unicode assumptions. This test
# only fails when such a version is being used and our workaround
# fails. If we're using a good version of the client library, the test
# will pass even if the workaround fails.
class MyDummy(DummyModel):
# Use a fresh bucket name here so we don't get leftover keys.
bucket = 'decoding_index_dummy'
dummy1 = self.mkdummy("foo", {"a": "b"}, dummy_class=MyDummy)
yield self.manager.store(dummy1)
[key] = yield self.manager.index_keys(
MyDummy, '$bucket', self.manager.bucket_name(MyDummy), None)
self.assertEqual(key, u"foo")
self.assertTrue(isinstance(key, unicode))
class TestTxRiakManager(CommonRiakManagerTests, VumiTestCase):
@inlineCallbacks
def setUp(self):
try:
from vumi.persist.txriak_manager import TxRiakManager
except ImportError, e:
import_skip(e, 'riak', 'riak')
self.manager = TxRiakManager.from_config({'bucket_prefix': 'test.'})
self.add_cleanup(self.manager.purge_all)
yield self.manager.purge_all()
def test_call_decorator(self):
self.assertEqual(type(self.manager).call_decorator, inlineCallbacks)
def test_transport_class_protocol_buffer(self):
manager_class = type(self.manager)
manager = manager_class.from_config({
'transport_type': 'pbc',
'bucket_prefix': 'test.',
})
self.assertEqual(manager.client.protocol, 'pbc')
def test_transport_class_http(self):
manager_class = type(self.manager)
manager = manager_class.from_config({
'transport_type': 'http',
'bucket_prefix': 'test.',
})
self.assertEqual(manager.client.protocol, 'http')
def test_transport_class_default(self):
manager_class = type(self.manager)
manager = manager_class.from_config({
'bucket_prefix': 'test.',
})
self.assertEqual(manager.client.protocol, 'http')
|
|
"""
Array methods which are called by both the C-code for the method
and the Python code for the NumPy-namespace function
"""
import warnings
from contextlib import nullcontext
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core._asarray import asanyarray
from numpy.core import numerictypes as nt
from numpy.core import _exceptions
from numpy._globals import _NoValue
from numpy.compat import pickle, os_fspath
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
umr_minimum = um.minimum.reduce
umr_sum = um.add.reduce
umr_prod = um.multiply.reduce
umr_any = um.logical_or.reduce
umr_all = um.logical_and.reduce
# Complex types to -> (2,)float view for fast-path computation in _var()
_complex_to_float = {
nt.dtype(nt.csingle) : nt.dtype(nt.single),
nt.dtype(nt.cdouble) : nt.dtype(nt.double),
}
# Special case for windows: ensure double takes precedence
if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
_complex_to_float.update({
nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble),
})
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
def _amax(a, axis=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_maximum(a, axis, None, out, keepdims, initial, where)
def _amin(a, axis=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_minimum(a, axis, None, out, keepdims, initial, where)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_sum(a, axis, dtype, out, keepdims, initial, where)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_prod(a, axis, dtype, out, keepdims, initial, where)
def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
# Parsing keyword arguments is currently fairly slow, so avoid it for now
if where is True:
return umr_any(a, axis, dtype, out, keepdims)
return umr_any(a, axis, dtype, out, keepdims, where=where)
def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
# Parsing keyword arguments is currently fairly slow, so avoid it for now
if where is True:
return umr_all(a, axis, dtype, out, keepdims)
return umr_all(a, axis, dtype, out, keepdims, where=where)
def _count_reduce_items(arr, axis, keepdims=False, where=True):
# fast-path for the default case
if where is True:
# no boolean mask given, calculate items according to axis
if axis is None:
axis = tuple(range(arr.ndim))
elif not isinstance(axis, tuple):
axis = (axis,)
items = nt.intp(1)
for ax in axis:
items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
else:
# TODO: Optimize case when `where` is broadcast along a non-reduction
# axis and full sum is more excessive than needed.
# guarded to protect circular imports
from numpy.lib.stride_tricks import broadcast_to
# count True values in (potentially broadcasted) boolean mask
items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,
keepdims)
return items
# Numpy 1.17.0, 2019-02-24
# Various clip behavior deprecations, marked with _clip_dep as a prefix.
def _clip_dep_is_scalar_nan(a):
# guarded to protect circular imports
from numpy.core.fromnumeric import ndim
if ndim(a) != 0:
return False
try:
return um.isnan(a)
except TypeError:
return False
def _clip_dep_is_byte_swapped(a):
if isinstance(a, mu.ndarray):
return not a.dtype.isnative
return False
def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs):
# normal path
if casting is not None:
return ufunc(*args, out=out, casting=casting, **kwargs)
# try to deal with broken casting rules
try:
return ufunc(*args, out=out, **kwargs)
except _exceptions._UFuncOutputCastingError as e:
# Numpy 1.17.0, 2019-02-24
warnings.warn(
"Converting the output of clip from {!r} to {!r} is deprecated. "
"Pass `casting=\"unsafe\"` explicitly to silence this warning, or "
"correct the type of the variables.".format(e.from_, e.to),
DeprecationWarning,
stacklevel=2
)
return ufunc(*args, out=out, casting="unsafe", **kwargs)
def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs):
if min is None and max is None:
raise ValueError("One of max or min must be given")
# Numpy 1.17.0, 2019-02-24
# This deprecation probably incurs a substantial slowdown for small arrays,
# it will be good to get rid of it.
if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out):
using_deprecated_nan = False
if _clip_dep_is_scalar_nan(min):
min = -float('inf')
using_deprecated_nan = True
if _clip_dep_is_scalar_nan(max):
max = float('inf')
using_deprecated_nan = True
if using_deprecated_nan:
warnings.warn(
"Passing `np.nan` to mean no clipping in np.clip has always "
"been unreliable, and is now deprecated. "
"In future, this will always return nan, like it already does "
"when min or max are arrays that contain nan. "
"To skip a bound, pass either None or an np.inf of an "
"appropriate sign.",
DeprecationWarning,
stacklevel=2
)
if min is None:
return _clip_dep_invoke_with_casting(
um.minimum, a, max, out=out, casting=casting, **kwargs)
elif max is None:
return _clip_dep_invoke_with_casting(
um.maximum, a, min, out=out, casting=casting, **kwargs)
else:
return _clip_dep_invoke_with_casting(
um.clip, a, min, max, out=out, casting=casting, **kwargs)
def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
arr = asanyarray(a)
is_float16_result = False
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
if rcount == 0 if where is True else umr_any(rcount == 0):
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None:
if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
elif issubclass(arr.dtype.type, nt.float16):
dtype = mu.dtype('f4')
is_float16_result = True
ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
if is_float16_result and out is None:
ret = arr.dtype.type(ret)
elif hasattr(ret, 'dtype'):
if is_float16_result:
ret = arr.dtype.type(ret / rcount)
else:
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
where=True):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
# Make this warning show up on top.
if ddof >= rcount if where is True else umr_any(ddof >= rcount):
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
stacklevel=2)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
# Compute the mean.
# Note that if dtype is not of inexact type then arraymean will
# not be either.
arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)
# The shape of rcount has to match arrmean to not change the shape of out
# in broadcasting. Otherwise, it cannot be stored back to arrmean.
if rcount.ndim == 0:
# fast-path for default case when where is True
div = rcount
else:
# matching rcount to arrmean when where is specified as array
div = rcount.reshape(arrmean.shape)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(arrmean, div, out=arrmean, casting='unsafe',
subok=False)
else:
arrmean = arrmean.dtype.type(arrmean / rcount)
# Compute sum of squared deviations from mean
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
x = um.multiply(x, x, out=x)
# Fast-paths for built-in complex types
elif x.dtype in _complex_to_float:
xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
um.multiply(xv, xv, out=xv)
x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
# Most general case; includes handling object arrays containing imaginary
# numbers and complex types with non-native byteorder
else:
x = um.multiply(x, um.conjugate(x), out=x).real
ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)
# Compute degrees of freedom and make sure it is not negative.
rcount = um.maximum(rcount - ddof, 0)
# divide by degrees of freedom
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
where=True):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims, where=where)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(um.sqrt(ret))
else:
ret = um.sqrt(ret)
return ret
def _ptp(a, axis=None, out=None, keepdims=False):
return um.subtract(
umr_maximum(a, axis, None, out, keepdims),
umr_minimum(a, axis, None, None, keepdims),
out
)
def _dump(self, file, protocol=2):
if hasattr(file, 'write'):
ctx = nullcontext(file)
else:
ctx = open(os_fspath(file), "wb")
with ctx as f:
pickle.dump(self, f, protocol=protocol)
def _dumps(self, protocol=2):
return pickle.dumps(self, protocol=protocol)
|
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
name: gcp_compute
plugin_type: inventory
short_description: Google Cloud Compute Engine inventory source
requirements:
- requests >= 2.18.4
- google-auth >= 1.3.0
extends_documentation_fragment:
- constructed
- inventory_cache
description:
- Get inventory hosts from Google Cloud Platform GCE.
- Uses a YAML configuration file that ends with gcp_compute.(yml|yaml) or gcp.(yml|yaml).
options:
plugin:
description: token that ensures this is a source file for the 'gcp_compute' plugin.
required: True
choices: ['gcp_compute']
zones:
description: A list of regions in which to describe GCE instances.
If none provided, it defaults to all zones available to a given project.
type: list
projects:
description: A list of projects in which to describe GCE instances.
type: list
required: True
filters:
description: >
A list of filter value pairs. Available filters are listed here
U(https://cloud.google.com/compute/docs/reference/rest/v1/instances/aggregatedList).
Each additional filter in the list will act be added as an AND condition
(filter1 and filter2)
type: list
hostnames:
description: A list of options that describe the ordering for which
hostnames should be assigned. Currently supported hostnames are
'public_ip', 'private_ip', or 'name'.
default: ['public_ip', 'private_ip', 'name']
type: list
auth_kind:
description:
- The type of credential used.
required: True
choices: ['application', 'serviceaccount', 'machineaccount']
env:
- name: GCP_AUTH_KIND
version_added: "2.8.2"
scopes:
description: list of authentication scopes
type: list
default: ['https://www.googleapis.com/auth/compute']
env:
- name: GCP_SCOPES
version_added: "2.8.2"
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
env:
- name: GCP_SERVICE_ACCOUNT_FILE
version_added: "2.8.2"
- name: GCE_CREDENTIALS_FILE_PATH
version_added: "2.8"
service_account_contents:
description:
- A string representing the contents of a Service Account JSON file. This should not be passed in as a dictionary,
but a string that has the exact contents of a service account json file (valid JSON).
type: string
env:
- name: GCP_SERVICE_ACCOUNT_CONTENTS
version_added: "2.8.2"
service_account_email:
description:
- An optional service account email address if machineaccount is selected
and the user does not wish to use the default email.
env:
- name: GCP_SERVICE_ACCOUNT_EMAIL
version_added: "2.8.2"
vars_prefix:
description: prefix to apply to host variables, does not include facts nor params
default: ''
use_contrib_script_compatible_sanitization:
description:
- By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
This option allows you to override that, in efforts to allow migration from the old inventory script.
- For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
otherwise the core engine will just use the standard sanitization on top.
- This is not the default as such names break certain functionality as not all characters are valid Python identifiers
which group names end up being used as.
type: bool
default: False
version_added: '2.8'
retrieve_image_info:
description:
- Populate the C(image) host fact for the instances returned with the GCP image name
- By default this plugin does not attempt to resolve the boot image of an instance to the image name cataloged in GCP
because of the performance overhead of the task.
- Unless this option is enabled, the C(image) host variable will be C(null)
type: bool
default: False
version_added: '2.8'
"""
EXAMPLES = """
plugin: gcp_compute
zones: # populate inventory with instances in these regions
- us-east1-a
projects:
- gcp-prod-gke-100
- gcp-cicd-101
filters:
- machineType = n1-standard-1
- scheduling.automaticRestart = true AND machineType = n1-standard-1
service_account_file: /tmp/service_account.json
auth_kind: serviceaccount
scopes:
- 'https://www.googleapis.com/auth/cloud-platform'
- 'https://www.googleapis.com/auth/compute.readonly'
keyed_groups:
# Create groups from GCE labels
- prefix: gcp
key: labels
hostnames:
# List host by name instead of the default public ip
- name
compose:
# Set an inventory parameter to use the Public IP address to connect to the host
# For Private ip use "networkInterfaces[0].networkIP"
ansible_host: networkInterfaces[0].accessConfigs[0].natIP
"""
import json
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.gcp_utils import (
GcpSession,
navigate_hash,
GcpRequestException,
HAS_GOOGLE_LIBRARIES,
)
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
# Mocking a module to reuse module_utils
class GcpMockModule(object):
def __init__(self, params):
self.params = params
def fail_json(self, *args, **kwargs):
raise AnsibleError(kwargs["msg"])
class GcpInstance(object):
def __init__(self, json, hostname_ordering, project_disks, should_format=True):
self.hostname_ordering = hostname_ordering
self.project_disks = project_disks
self.json = json
if should_format:
self.convert()
def to_json(self):
return self.json
def convert(self):
if "zone" in self.json:
self.json["zone_selflink"] = self.json["zone"]
self.json["zone"] = self.json["zone"].split("/")[-1]
if "machineType" in self.json:
self.json["machineType_selflink"] = self.json["machineType"]
self.json["machineType"] = self.json["machineType"].split("/")[-1]
if "networkInterfaces" in self.json:
for network in self.json["networkInterfaces"]:
if "network" in network:
network["network"] = self._format_network_info(network["network"])
if "subnetwork" in network:
network["subnetwork"] = self._format_network_info(
network["subnetwork"]
)
if "metadata" in self.json:
# If no metadata, 'items' will be blank.
# We want the metadata hash overriden anyways for consistency.
self.json["metadata"] = self._format_metadata(
self.json["metadata"].get("items", {})
)
self.json["project"] = self.json["selfLink"].split("/")[6]
self.json["image"] = self._get_image()
def _format_network_info(self, address):
"""
:param address: A GCP network address
:return a dict with network shortname and region
"""
split = address.split("/")
region = ""
if "global" in split:
region = "global"
else:
region = split[8]
return {"region": region, "name": split[-1], "selfLink": address}
def _format_metadata(self, metadata):
"""
:param metadata: A list of dicts where each dict has keys "key" and "value"
:return a dict with key/value pairs for each in list.
"""
new_metadata = {}
for pair in metadata:
new_metadata[pair["key"]] = pair["value"]
return new_metadata
def hostname(self):
"""
:return the hostname of this instance
"""
for order in self.hostname_ordering:
name = None
if order == "public_ip":
name = self._get_publicip()
elif order == "private_ip":
name = self._get_privateip()
elif order == "name":
name = self.json[u"name"]
else:
raise AnsibleParserError("%s is not a valid hostname precedent" % order)
if name:
return name
raise AnsibleParserError("No valid name found for host")
def _get_publicip(self):
"""
:return the publicIP of this instance or None
"""
# Get public IP if exists
for interface in self.json["networkInterfaces"]:
if "accessConfigs" in interface:
for accessConfig in interface["accessConfigs"]:
if "natIP" in accessConfig:
return accessConfig[u"natIP"]
return None
def _get_image(self):
"""
:param instance: A instance response from GCP
:return the image of this instance or None
"""
image = None
if self.project_disks and "disks" in self.json:
for disk in self.json["disks"]:
if disk.get("boot"):
image = self.project_disks[disk["source"]]
return image
def _get_privateip(self):
"""
:param item: A host response from GCP
:return the privateIP of this instance or None
"""
# Fallback: Get private IP
for interface in self.json[u"networkInterfaces"]:
if "networkIP" in interface:
return interface[u"networkIP"]
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = "gcp_compute"
_instances = (
r"https://www.googleapis.com/compute/v1/projects/%s/aggregated/instances"
)
def __init__(self):
super(InventoryModule, self).__init__()
self.group_prefix = "gcp_"
def _populate_host(self, item):
"""
:param item: A GCP instance
"""
hostname = item.hostname()
self.inventory.add_host(hostname)
for key in item.to_json():
try:
self.inventory.set_variable(
hostname, self.get_option("vars_prefix") + key, item.to_json()[key]
)
except (ValueError, TypeError) as e:
self.display.warning(
"Could not set host info hostvar for %s, skipping %s: %s"
% (hostname, key, to_text(e))
)
self.inventory.add_child("all", hostname)
def verify_file(self, path):
"""
:param path: the path to the inventory config file
:return the contents of the config file
"""
if super(InventoryModule, self).verify_file(path):
if path.endswith(("gcp.yml", "gcp.yaml")):
return True
elif path.endswith(("gcp_compute.yml", "gcp_compute.yaml")):
return True
return False
def fetch_list(self, params, link, query):
"""
:param params: a dict containing all of the fields relevant to build URL
:param link: a formatted URL
:param query: a formatted query string
:return the JSON response containing a list of instances.
"""
lists = []
resp = self._return_if_object(
self.fake_module, self.auth_session.get(link, params={"filter": query})
)
lists.append(resp.get("items"))
while resp.get("nextPageToken"):
resp = self._return_if_object(
self.fake_module,
self.auth_session.get(
link,
params={"filter": query, "pageToken": resp.get("nextPageToken")},
),
)
lists.append(resp.get("items"))
return self.build_list(lists)
def build_list(self, lists):
arrays_for_zones = {}
for resp in lists:
for zone in resp:
if "instances" in resp[zone]:
if zone in arrays_for_zones:
arrays_for_zones[zone] = (
arrays_for_zones[zone] + resp[zone]["instances"]
)
else:
arrays_for_zones[zone] = resp[zone]["instances"]
return arrays_for_zones
def _get_query_options(self, filters):
"""
:param config_data: contents of the inventory config file
:return A fully built query string
"""
if not filters:
return ""
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != "(" and f[-1] != ")":
queries.append("(%s)" % "".join(f))
else:
queries.append(f)
return " ".join(queries)
def _return_if_object(self, module, response):
"""
:param module: A GcpModule
:param response: A Requests response object
:return JSON response
"""
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
response.raise_for_status
result = response.json()
except getattr(json.decoder, "JSONDecodeError", ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
except GcpRequestException as inst:
module.fail_json(msg="Network error: %s" % inst)
if navigate_hash(result, ["error", "errors"]):
module.fail_json(msg=navigate_hash(result, ["error", "errors"]))
return result
def _add_hosts(self, items, config_data, format_items=True, project_disks=None):
"""
:param items: A list of hosts
:param config_data: configuration data
:param format_items: format items or not
"""
if not items:
return
hostname_ordering = ["public_ip", "private_ip", "name"]
if self.get_option("hostnames"):
hostname_ordering = self.get_option("hostnames")
for host_json in items:
host = GcpInstance(
host_json, hostname_ordering, project_disks, format_items
)
self._populate_host(host)
hostname = host.hostname()
self._set_composite_vars(
self.get_option("compose"), host.to_json(), hostname
)
self._add_host_to_composed_groups(
self.get_option("groups"), host.to_json(), hostname
)
self._add_host_to_keyed_groups(
self.get_option("keyed_groups"), host.to_json(), hostname
)
def _get_project_disks(self, config_data, query):
"""
project space disk images
"""
try:
self._project_disks
except AttributeError:
self._project_disks = {}
request_params = {"maxResults": 500, "filter": query}
for project in config_data["projects"]:
session_responses = []
page_token = True
while page_token:
response = self.auth_session.get(
"https://www.googleapis.com/compute/v1/projects/{0}/aggregated/disks".format(
project
),
params=request_params,
)
response_json = response.json()
if "nextPageToken" in response_json:
request_params["pageToken"] = response_json["nextPageToken"]
elif "pageToken" in request_params:
del request_params["pageToken"]
if "items" in response_json:
session_responses.append(response_json)
page_token = "pageToken" in request_params
for response in session_responses:
if "items" in response:
# example k would be a zone or region name
# example v would be { "disks" : [], "otherkey" : "..." }
for zone_or_region, aggregate in response["items"].items():
if "zones" in zone_or_region:
if "disks" in aggregate:
zone = zone_or_region.replace("zones/", "")
for disk in aggregate["disks"]:
if (
"zones" in config_data
and zone in config_data["zones"]
):
# If zones specified, only store those zones' data
if "sourceImage" in disk:
self._project_disks[
disk["selfLink"]
] = disk["sourceImage"].split("/")[-1]
else:
self._project_disks[
disk["selfLink"]
] = disk["selfLink"].split("/")[-1]
else:
if "sourceImage" in disk:
self._project_disks[
disk["selfLink"]
] = disk["sourceImage"].split("/")[-1]
else:
self._project_disks[
disk["selfLink"]
] = disk["selfLink"].split("/")[-1]
return self._project_disks
def parse(self, inventory, loader, path, cache=True):
if not HAS_GOOGLE_LIBRARIES:
raise AnsibleParserError(
"gce inventory plugin cannot start: %s"
% missing_required_lib("google-auth")
)
super(InventoryModule, self).parse(inventory, loader, path)
config_data = {}
config_data = self._read_config_data(path)
if self.get_option("use_contrib_script_compatible_sanitization"):
self._sanitize_group_name = (
self._legacy_script_compatible_group_sanitization
)
# setup parameters as expected by 'fake module class' to reuse module_utils w/o changing the API
params = {
"filters": self.get_option("filters"),
"projects": self.get_option("projects"),
"scopes": self.get_option("scopes"),
"zones": self.get_option("zones"),
"auth_kind": self.get_option("auth_kind"),
"service_account_file": self.get_option("service_account_file"),
"service_account_contents": self.get_option("service_account_contents"),
"service_account_email": self.get_option("service_account_email"),
}
self.fake_module = GcpMockModule(params)
self.auth_session = GcpSession(self.fake_module, "compute")
query = self._get_query_options(params["filters"])
if self.get_option("retrieve_image_info"):
project_disks = self._get_project_disks(config_data, query)
else:
project_disks = None
# Cache logic
if cache:
cache = self.get_option("cache")
cache_key = self.get_cache_key(path)
else:
cache_key = None
cache_needs_update = False
if cache:
try:
results = self._cache[cache_key]
for project in results:
for zone in results[project]:
self._add_hosts(
results[project][zone],
config_data,
False,
project_disks=project_disks,
)
except KeyError:
cache_needs_update = True
if not cache or cache_needs_update:
cached_data = {}
for project in params["projects"]:
cached_data[project] = {}
params["project"] = project
zones = params["zones"]
# Fetch all instances
link = self._instances % project
resp = self.fetch_list(params, link, query)
for key, value in resp.items():
zone = key[6:]
if not zones or zone in zones:
self._add_hosts(value, config_data, project_disks=project_disks)
cached_data[project][zone] = value
if cache_needs_update:
self._cache[cache_key] = cached_data
@staticmethod
def _legacy_script_compatible_group_sanitization(name):
return name
|
|
import random
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
import warnings
import json
from django.test.utils import override_settings
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.test.client import RequestFactory
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth.models import AnonymousUser
from ..tests import MockedResponse, mocked_response
from ..account import app_settings as account_settings
from ..account.models import EmailAddress
from ..account.utils import user_email
from ..utils import get_user_model
from .models import SocialApp, SocialAccount, SocialLogin
from .helpers import complete_social_login
def create_oauth_tests(provider):
def get_mocked_response(self):
pass
def setUp(self):
app = SocialApp.objects.create(provider=provider.id,
name=provider.id,
client_id='app123id',
key=provider.id,
secret='dummy')
app.sites.add(Site.objects.get_current())
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=False)
def test_login(self):
resp_mocks = self.get_mocked_response()
if resp_mocks is None:
warnings.warn("Cannot test provider %s, no oauth mock"
% self.provider.id)
return
resp = self.login(resp_mocks)
self.assertRedirects(resp, reverse('socialaccount_signup'))
resp = self.client.get(reverse('socialaccount_signup'))
sociallogin = resp.context['form'].sociallogin
data = dict(email=user_email(sociallogin.user),
username=str(random.randrange(1000, 10000000)))
resp = self.client.post(reverse('socialaccount_signup'),
data=data)
self.assertEqual('http://testserver/accounts/profile/',
resp['location'])
user = resp.context['user']
self.assertFalse(user.has_usable_password())
return SocialAccount.objects.get(user=user,
provider=self.provider.id)
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=True,
SOCIALACCOUNT_EMAIL_REQUIRED=False,
ACCOUNT_EMAIL_REQUIRED=False)
def test_auto_signup(self):
resp_mocks = self.get_mocked_response()
if not resp_mocks:
warnings.warn("Cannot test provider %s, no oauth mock"
% self.provider.id)
return
resp = self.login(resp_mocks)
self.assertEqual('http://testserver/accounts/profile/',
resp['location'])
self.assertFalse(resp.context['user'].has_usable_password())
def login(self, resp_mocks, process='login'):
with mocked_response(MockedResponse(200,
'oauth_token=token&'
'oauth_token_secret=psst',
{'content-type':
'text/html'})):
resp = self.client.get(reverse(self.provider.id + '_login'),
dict(process=process))
p = urlparse(resp['location'])
q = parse_qs(p.query)
complete_url = reverse(self.provider.id+'_callback')
self.assertGreater(q['oauth_callback'][0]
.find(complete_url), 0)
with mocked_response(self.get_access_token_response(),
*resp_mocks):
resp = self.client.get(complete_url)
return resp
def get_access_token_response(self):
return MockedResponse(
200,
'oauth_token=token&oauth_token_secret=psst',
{'content-type': 'text/html'})
impl = {'setUp': setUp,
'login': login,
'test_login': test_login,
'get_mocked_response': get_mocked_response,
'get_access_token_response': get_access_token_response}
class_name = 'OAuth2Tests_'+provider.id
Class = type(class_name, (TestCase,), impl)
Class.provider = provider
return Class
def create_oauth2_tests(provider):
def get_mocked_response(self):
pass
def get_login_response_json(self, with_refresh_token=True):
rt = ''
if with_refresh_token:
rt = ',"refresh_token": "testrf"'
return """{
"uid":"weibo",
"access_token":"testac"
%s }""" % rt
def setUp(self):
app = SocialApp.objects.create(provider=provider.id,
name=provider.id,
client_id='app123id',
key=provider.id,
secret='dummy')
app.sites.add(Site.objects.get_current())
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=False)
def test_login(self):
resp_mock = self.get_mocked_response()
if not resp_mock:
warnings.warn("Cannot test provider %s, no oauth mock"
% self.provider.id)
return
resp = self.login(resp_mock,)
self.assertRedirects(resp, reverse('socialaccount_signup'))
def test_account_tokens(self, multiple_login=False):
email = 'some@mail.com'
user = get_user_model().objects.create(
username='user',
is_active=True,
email=email)
user.set_password('test')
user.save()
EmailAddress.objects.create(user=user,
email=email,
primary=True,
verified=True)
self.client.login(username=user.username,
password='test')
self.login(self.get_mocked_response(), process='connect')
if multiple_login:
self.login(
self.get_mocked_response(),
with_refresh_token=False,
process='connect')
# get account
sa = SocialAccount.objects.filter(user=user,
provider=self.provider.id).get()
# get token
t = sa.socialtoken_set.get()
# verify access_token and refresh_token
self.assertEqual('testac', t.token)
self.assertEqual(t.token_secret,
json.loads(self.get_login_response_json(
with_refresh_token=True)).get(
'refresh_token', ''))
def test_account_refresh_token_saved_next_login(self):
'''
fails if a login missing a refresh token, deletes the previously
saved refresh token. Systems such as google's oauth only send
a refresh token on first login.
'''
self.test_account_tokens(multiple_login=True)
def login(self, resp_mock, process='login',
with_refresh_token=True):
resp = self.client.get(reverse(self.provider.id + '_login'),
dict(process=process))
p = urlparse(resp['location'])
q = parse_qs(p.query)
complete_url = reverse(self.provider.id+'_callback')
self.assertGreater(q['redirect_uri'][0]
.find(complete_url), 0)
response_json = self \
.get_login_response_json(with_refresh_token=with_refresh_token)
with mocked_response(
MockedResponse(
200,
response_json,
{'content-type': 'application/json'}),
resp_mock):
resp = self.client.get(complete_url,
{'code': 'test',
'state': q['state'][0]})
return resp
impl = {'setUp': setUp,
'login': login,
'test_login': test_login,
'test_account_tokens': test_account_tokens,
'test_account_refresh_token_saved_next_login':
test_account_refresh_token_saved_next_login,
'get_login_response_json': get_login_response_json,
'get_mocked_response': get_mocked_response}
class_name = 'OAuth2Tests_'+provider.id
Class = type(class_name, (TestCase,), impl)
Class.provider = provider
return Class
class SocialAccountTests(TestCase):
@override_settings(
SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None,
ACCOUNT_EMAIL_VERIFICATION=account_settings.EmailVerificationMethod.NONE # noqa
)
def test_email_address_created(self):
factory = RequestFactory()
request = factory.get('/accounts/login/callback/')
request.user = AnonymousUser()
SessionMiddleware().process_request(request)
MessageMiddleware().process_request(request)
User = get_user_model()
user = User()
setattr(user, account_settings.USER_MODEL_USERNAME_FIELD, 'test')
setattr(user, account_settings.USER_MODEL_EMAIL_FIELD, 'test@test.com')
account = SocialAccount(provider='openid', uid='123')
sociallogin = SocialLogin(user=user, account=account)
complete_social_login(request, sociallogin)
user = User.objects.get(
**{account_settings.USER_MODEL_USERNAME_FIELD: 'test'}
)
self.assertTrue(
SocialAccount.objects.filter(user=user, uid=account.uid).exists()
)
self.assertTrue(
EmailAddress.objects.filter(user=user,
email=user_email(user)).exists()
)
|
|
"""
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from __future__ import division, print_function, absolute_import
import numpy
import numpy as np
from numpy import (exp, log, asarray, arange, newaxis, hstack, product, array,
zeros, eye, poly1d, r_, sum, fromstring, isfinite,
squeeze, amax, reshape, sign, broadcast_arrays)
from scipy._lib._version import NumpyVersion
__all__ = ['logsumexp', 'central_diff_weights', 'derivative', 'pade', 'lena',
'ascent', 'face']
_NUMPY_170 = (NumpyVersion(numpy.__version__) >= NumpyVersion('1.7.0'))
def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed. Tuple of ints is not accepted if NumPy
version is lower than 1.7.0.
.. versionadded:: 0.11.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
.. versionadded:: 0.15.0
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`. These values may be negative in order to
implement subtraction.
.. versionadded:: 0.12.0
return_sign : bool, optional
If this is set to True, the result will be a pair containing sign
information; if False, results that are negative will be returned
as NaN. Default is False (no sign information).
.. versionadded:: 0.16.0
Returns
-------
res : ndarray
The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
is returned.
sgn : ndarray
If return_sign is True, this will be an array of floating-point
numbers matching res and +1, 0, or -1 depending on the sign
of the result. If False, only one result is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2
Notes
-----
Numpy has a logaddexp function which is very similar to `logsumexp`, but
only handles two arguments. `logaddexp.reduce` is similar to this
function, but may be less stable.
Examples
--------
>>> from scipy.misc import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
With weights
>>> a = np.arange(10)
>>> b = np.arange(10, 0, -1)
>>> logsumexp(a, b=b)
9.9170178533034665
>>> np.log(np.sum(b*np.exp(a)))
9.9170178533034647
Returning a sign flag
>>> logsumexp([1,2],b=[1,-1],return_sign=True)
(1.5413248546129181, -1.0)
"""
a = asarray(a)
if b is not None:
a, b = broadcast_arrays(a,b)
if np.any(b == 0):
a = a + 0. # promote to at least float
a[b == 0] = -np.inf
# keepdims is available in numpy.sum and numpy.amax since NumPy 1.7.0
#
# Because SciPy supports versions earlier than 1.7.0, we have to handle
# those old versions differently
if not _NUMPY_170:
# When support for Numpy < 1.7.0 is dropped, this implementation can be
# removed. This implementation is a bit hacky. Similarly to old NumPy's
# sum and amax functions, 'axis' must be an integer or None, tuples and
# lists are not supported. Although 'keepdims' is not supported by these
# old NumPy's functions, this function supports it.
# Solve the shape of the reduced array
if axis is None:
sh_keepdims = (1,) * a.ndim
else:
sh_keepdims = list(a.shape)
sh_keepdims[axis] = 1
a_max = amax(a, axis=axis)
if a_max.ndim > 0:
a_max[~isfinite(a_max)] = 0
elif not isfinite(a_max):
a_max = 0
if b is not None:
tmp = b * exp(a - reshape(a_max, sh_keepdims))
else:
tmp = exp(a - reshape(a_max, sh_keepdims))
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
s = sum(tmp, axis=axis)
if return_sign:
sgn = sign(s)
s *= sgn # /= makes more sense but we need zero -> zero
out = log(s)
out += a_max
if keepdims:
# Put back the reduced axes with size one
out = reshape(out, sh_keepdims)
if return_sign:
sgn = reshape(sgn, sh_keepdims)
else:
# This is a more elegant implementation, requiring NumPy >= 1.7.0
a_max = amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~isfinite(a_max)] = 0
elif not isfinite(a_max):
a_max = 0
if b is not None:
b = asarray(b)
tmp = b * exp(a - a_max)
else:
tmp = exp(a - a_max)
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
s = sum(tmp, axis=axis, keepdims=keepdims)
if return_sign:
sgn = sign(s)
s *= sgn # /= makes more sense but we need zero -> zero
out = log(s)
if not keepdims:
a_max = squeeze(a_max, axis=axis)
out += a_max
if return_sign:
return out, sgn
else:
return out
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the `n`-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which `n`-th derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def pade(an, m):
"""
Return Pade approximation to a polynomial as the ratio of two polynomials.
Parameters
----------
an : (N,) array_like
Taylor series coefficients.
m : int
The order of the returned approximating polynomials.
Returns
-------
p, q : Polynomial class
The pade approximation of the polynomial defined by `an` is
`p(x)/q(x)`.
Examples
--------
>>> from scipy import misc
>>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
>>> p, q = misc.pade(e_exp, 2)
>>> e_exp.reverse()
>>> e_poly = np.poly1d(e_exp)
Compare ``e_poly(x)`` and the pade approximation ``p(x)/q(x)``
>>> e_poly(1)
2.7166666666666668
>>> p(1)/q(1)
2.7179487179487181
"""
from scipy import linalg
an = asarray(an)
N = len(an) - 1
n = N - m
if n < 0:
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1, n+1)
Bkj = zeros((N+1, m), 'd')
for row in range(1, m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1, N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj, Bkj))
pq = linalg.solve(C, an)
p = pq[:n+1]
q = r_[1.0, pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
def lena():
"""
Function that previously returned an example image
.. note:: Removed in 0.17
Parameters
----------
None
Returns
-------
None
Raises
------
RuntimeError
This functionality has been removed due to licensing reasons.
Notes
-----
The image previously returned by this function has an incompatible license
and has been removed from SciPy. Please use `face` or `ascent` instead.
See Also
--------
face, ascent
"""
raise RuntimeError('lena() is no longer included in SciPy, please use '
'ascent() or face() instead')
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True then return color image, otherwise return an 8-bit gray-scale
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = fromstring(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
|
|
"""Support for Tuya number."""
from __future__ import annotations
from tuya_iot import TuyaDevice, TuyaDeviceManager
from homeassistant.components.number import NumberEntity, NumberEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import HomeAssistantTuyaData
from .base import IntegerTypeData, TuyaEntity
from .const import DOMAIN, TUYA_DISCOVERY_NEW, DPCode, DPType
# All descriptions can be found here. Mostly the Integer data types in the
# default instructions set of each category end up being a number.
# https://developer.tuya.com/en/docs/iot/standarddescription?id=K9i5ql6waswzq
NUMBERS: dict[str, tuple[NumberEntityDescription, ...]] = {
# Smart Kettle
# https://developer.tuya.com/en/docs/iot/fbh?id=K9gf484m21yq7
"bh": (
NumberEntityDescription(
key=DPCode.TEMP_SET,
name="Temperature",
icon="mdi:thermometer",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.TEMP_SET_F,
name="Temperature",
icon="mdi:thermometer",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.TEMP_BOILING_C,
name="Temperature After Boiling",
icon="mdi:thermometer",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.TEMP_BOILING_F,
name="Temperature After Boiling",
icon="mdi:thermometer",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.WARM_TIME,
name="Heat Preservation Time",
icon="mdi:timer",
entity_category=EntityCategory.CONFIG,
),
),
# Smart Pet Feeder
# https://developer.tuya.com/en/docs/iot/categorycwwsq?id=Kaiuz2b6vydld
"cwwsq": (
NumberEntityDescription(
key=DPCode.MANUAL_FEED,
name="Feed",
icon="mdi:bowl",
),
NumberEntityDescription(
key=DPCode.VOICE_TIMES,
name="Voice Times",
icon="mdi:microphone",
),
),
# Human Presence Sensor
# https://developer.tuya.com/en/docs/iot/categoryhps?id=Kaiuz42yhn1hs
"hps": (
NumberEntityDescription(
key=DPCode.SENSITIVITY,
name="Sensitivity",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.NEAR_DETECTION,
name="Near Detection",
icon="mdi:signal-distance-variant",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.FAR_DETECTION,
name="Far Detection",
icon="mdi:signal-distance-variant",
entity_category=EntityCategory.CONFIG,
),
),
# Coffee maker
# https://developer.tuya.com/en/docs/iot/categorykfj?id=Kaiuz2p12pc7f
"kfj": (
NumberEntityDescription(
key=DPCode.WATER_SET,
name="Water Level",
icon="mdi:cup-water",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.TEMP_SET,
name="Temperature",
icon="mdi:thermometer",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.WARM_TIME,
name="Heat Preservation Time",
icon="mdi:timer",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.POWDER_SET,
name="Powder",
entity_category=EntityCategory.CONFIG,
),
),
# Robot Vacuum
# https://developer.tuya.com/en/docs/iot/fsd?id=K9gf487ck1tlo
"sd": (
NumberEntityDescription(
key=DPCode.VOLUME_SET,
name="Volume",
icon="mdi:volume-high",
entity_category=EntityCategory.CONFIG,
),
),
# Siren Alarm
# https://developer.tuya.com/en/docs/iot/categorysgbj?id=Kaiuz37tlpbnu
"sgbj": (
NumberEntityDescription(
key=DPCode.ALARM_TIME,
name="Time",
entity_category=EntityCategory.CONFIG,
),
),
# Smart Camera
# https://developer.tuya.com/en/docs/iot/categorysp?id=Kaiuz35leyo12
"sp": (
NumberEntityDescription(
key=DPCode.BASIC_DEVICE_VOLUME,
name="Volume",
icon="mdi:volume-high",
entity_category=EntityCategory.CONFIG,
),
),
# Dimmer Switch
# https://developer.tuya.com/en/docs/iot/categorytgkg?id=Kaiuz0ktx7m0o
"tgkg": (
NumberEntityDescription(
key=DPCode.BRIGHTNESS_MIN_1,
name="Minimum Brightness",
icon="mdi:lightbulb-outline",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.BRIGHTNESS_MAX_1,
name="Maximum Brightness",
icon="mdi:lightbulb-on-outline",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.BRIGHTNESS_MIN_2,
name="Minimum Brightness 2",
icon="mdi:lightbulb-outline",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.BRIGHTNESS_MAX_2,
name="Maximum Brightness 2",
icon="mdi:lightbulb-on-outline",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.BRIGHTNESS_MIN_3,
name="Minimum Brightness 3",
icon="mdi:lightbulb-outline",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.BRIGHTNESS_MAX_3,
name="Maximum Brightness 3",
icon="mdi:lightbulb-on-outline",
entity_category=EntityCategory.CONFIG,
),
),
# Dimmer Switch
# https://developer.tuya.com/en/docs/iot/categorytgkg?id=Kaiuz0ktx7m0o
"tgq": (
NumberEntityDescription(
key=DPCode.BRIGHTNESS_MIN_1,
name="Minimum Brightness",
icon="mdi:lightbulb-outline",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.BRIGHTNESS_MAX_1,
name="Maximum Brightness",
icon="mdi:lightbulb-on-outline",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.BRIGHTNESS_MIN_2,
name="Minimum Brightness 2",
icon="mdi:lightbulb-outline",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.BRIGHTNESS_MAX_2,
name="Maximum Brightness 2",
icon="mdi:lightbulb-on-outline",
entity_category=EntityCategory.CONFIG,
),
),
# Vibration Sensor
# https://developer.tuya.com/en/docs/iot/categoryzd?id=Kaiuz3a5vrzno
"zd": (
NumberEntityDescription(
key=DPCode.SENSITIVITY,
name="Sensitivity",
entity_category=EntityCategory.CONFIG,
),
),
# Fingerbot
"szjqr": (
NumberEntityDescription(
key=DPCode.ARM_DOWN_PERCENT,
name="Move Down %",
icon="mdi:arrow-down-bold",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.ARM_UP_PERCENT,
name="Move Up %",
icon="mdi:arrow-up-bold",
entity_category=EntityCategory.CONFIG,
),
NumberEntityDescription(
key=DPCode.CLICK_SUSTAIN_TIME,
name="Down Delay",
icon="mdi:timer",
entity_category=EntityCategory.CONFIG,
),
),
# Fan
# https://developer.tuya.com/en/docs/iot/categoryfs?id=Kaiuz1xweel1c
"fs": (
NumberEntityDescription(
key=DPCode.TEMP,
name="Temperature",
icon="mdi:thermometer-lines",
),
),
# Humidifier
# https://developer.tuya.com/en/docs/iot/categoryjsq?id=Kaiuz1smr440b
"jsq": (
NumberEntityDescription(
key=DPCode.TEMP_SET,
name="Temperature",
icon="mdi:thermometer-lines",
),
NumberEntityDescription(
key=DPCode.TEMP_SET_F,
name="Temperature",
icon="mdi:thermometer-lines",
),
),
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Tuya number dynamically through Tuya discovery."""
hass_data: HomeAssistantTuyaData = hass.data[DOMAIN][entry.entry_id]
@callback
def async_discover_device(device_ids: list[str]) -> None:
"""Discover and add a discovered Tuya number."""
entities: list[TuyaNumberEntity] = []
for device_id in device_ids:
device = hass_data.device_manager.device_map[device_id]
if descriptions := NUMBERS.get(device.category):
for description in descriptions:
if description.key in device.status:
entities.append(
TuyaNumberEntity(
device, hass_data.device_manager, description
)
)
async_add_entities(entities)
async_discover_device([*hass_data.device_manager.device_map])
entry.async_on_unload(
async_dispatcher_connect(hass, TUYA_DISCOVERY_NEW, async_discover_device)
)
class TuyaNumberEntity(TuyaEntity, NumberEntity):
"""Tuya Number Entity."""
_number: IntegerTypeData | None = None
def __init__(
self,
device: TuyaDevice,
device_manager: TuyaDeviceManager,
description: NumberEntityDescription,
) -> None:
"""Init Tuya sensor."""
super().__init__(device, device_manager)
self.entity_description = description
self._attr_unique_id = f"{super().unique_id}{description.key}"
if int_type := self.find_dpcode(
description.key, dptype=DPType.INTEGER, prefer_function=True
):
self._number = int_type
self._attr_max_value = self._number.max_scaled
self._attr_min_value = self._number.min_scaled
self._attr_step = self._number.step_scaled
if description.unit_of_measurement is None:
self._attr_unit_of_measurement = self._number.unit
@property
def value(self) -> float | None:
"""Return the entity value to represent the entity state."""
# Unknown or unsupported data type
if self._number is None:
return None
# Raw value
if not (value := self.device.status.get(self.entity_description.key)):
return None
return self._number.scale_value(value)
def set_value(self, value: float) -> None:
"""Set new value."""
if self._number is None:
raise RuntimeError("Cannot set value, device doesn't provide type data")
self._send_command(
[
{
"code": self.entity_description.key,
"value": self._number.scale_value_back(value),
}
]
)
|
|
# stdlib
import collections
import logging
import pprint
import socket
import sys
import time
# project
from checks import AGENT_METRICS_CHECK_NAME, AgentCheck, create_service_check
from checks.check_status import (
CheckStatus,
CollectorStatus,
EmitterStatus,
STATUS_ERROR,
STATUS_OK,
)
from checks.datadog import DdForwarder, Dogstreams
from checks.ganglia import Ganglia
from config import get_system_stats, get_version
from resources.processes import Processes as ResProcesses
import checks.system.unix as u
import checks.system.win32 as w32
import modules
from util import (
EC2,
GCE,
get_os,
get_uuid,
Timer,
)
from utils.debug import log_exceptions
from utils.jmx import JMXFiles
from utils.platform import Platform
from utils.subprocess_output import get_subprocess_output
log = logging.getLogger(__name__)
FLUSH_LOGGING_PERIOD = 10
FLUSH_LOGGING_INITIAL = 5
DD_CHECK_TAG = 'dd_check:{0}'
class AgentPayload(collections.MutableMapping):
"""
AgentPayload offers a single payload interface but manages two payloads:
* A metadata payload
* A data payload that contains metrics, events, service_checks and more
Each of these payloads is automatically submited to its specific endpoint.
"""
METADATA_KEYS = frozenset(['meta', 'tags', 'host-tags', 'systemStats',
'agent_checks', 'gohai', 'external_host_tags'])
DUPLICATE_KEYS = frozenset(['apiKey', 'agentVersion'])
COMMON_ENDPOINT = ''
DATA_ENDPOINT = 'metrics'
METADATA_ENDPOINT = 'metadata'
def __init__(self):
self.data_payload = dict()
self.meta_payload = dict()
@property
def payload(self):
"""
Single payload with the content of data and metadata payloads.
"""
res = self.data_payload.copy()
res.update(self.meta_payload)
return res
def __getitem__(self, key):
if key in self.METADATA_KEYS:
return self.meta_payload[key]
else:
return self.data_payload[key]
def __setitem__(self, key, value):
if key in self.DUPLICATE_KEYS:
self.data_payload[key] = value
self.meta_payload[key] = value
elif key in self.METADATA_KEYS:
self.meta_payload[key] = value
else:
self.data_payload[key] = value
def __delitem__(self, key):
if key in self.DUPLICATE_KEYS:
del self.data_payload[key]
del self.meta_payload[key]
elif key in self.METADATA_KEYS:
del self.meta_payload[key]
else:
del self.data_payload[key]
def __iter__(self):
for item in self.data_payload:
yield item
for item in self.meta_payload:
yield item
def __len__(self):
return len(self.data_payload) + len(self.meta_payload)
def emit(self, log, config, emitters, continue_running, merge_payloads=True):
"""
Send payloads via the emitters.
:param merge_payloads: merge data and metadata payloads in a single payload and submit it
to the common endpoint
:type merge_payloads: boolean
"""
statuses = []
def _emit_payload(payload, endpoint):
""" Send the payload via the emitters. """
statuses = []
for emitter in emitters:
# Don't try to send to an emitter if we're stopping/
if not continue_running:
return statuses
name = emitter.__name__
emitter_status = EmitterStatus(name)
try:
emitter(payload, log, config, endpoint)
except Exception, e:
log.exception("Error running emitter: %s"
% emitter.__name__)
emitter_status = EmitterStatus(name, e)
statuses.append(emitter_status)
return statuses
if merge_payloads:
statuses.extend(_emit_payload(self.payload, self.COMMON_ENDPOINT))
else:
statuses.extend(_emit_payload(self.data_payload, self.DATA_ENDPOINT))
statuses.extend(_emit_payload(self.meta_payload, self.METADATA_ENDPOINT))
return statuses
class Collector(object):
"""
The collector is responsible for collecting data from each check and
passing it along to the emitters, who send it to their final destination.
"""
def __init__(self, agentConfig, emitters, systemStats, hostname):
self.emit_duration = None
self.agentConfig = agentConfig
self.hostname = hostname
# system stats is generated by config.get_system_stats
self.agentConfig['system_stats'] = systemStats
# agent config is used during checks, system_stats can be accessed through the config
self.os = get_os()
self.plugins = None
self.emitters = emitters
self.check_timings = agentConfig.get('check_timings')
self.push_times = {
'host_metadata': {
'start': time.time(),
'interval': int(agentConfig.get('metadata_interval', 4 * 60 * 60))
},
'external_host_tags': {
'start': time.time() - 3 * 60, # Wait for the checks to init
'interval': int(agentConfig.get('external_host_tags', 5 * 60))
},
'agent_checks': {
'start': time.time(),
'interval': int(agentConfig.get('agent_checks_interval', 10 * 60))
},
'dd_check_tags': {
'start': time.time(),
'interval': int(agentConfig.get('dd_check_tags_interval', 10 * 60))
},
}
socket.setdefaulttimeout(15)
self.run_count = 0
self.continue_running = True
self.hostname_metadata_cache = None
self.initialized_checks_d = []
self.init_failed_checks_d = {}
# Unix System Checks
self._unix_system_checks = {
'io': u.IO(log),
'load': u.Load(log),
'memory': u.Memory(log),
'processes': u.Processes(log),
'cpu': u.Cpu(log),
'system': u.System(log)
}
# Win32 System `Checks
self._win32_system_checks = {
'io': w32.IO(log),
'proc': w32.Processes(log),
'memory': w32.Memory(log),
'network': w32.Network(log),
'cpu': w32.Cpu(log)
}
# Old-style metric checks
self._ganglia = Ganglia(log)
self._dogstream = Dogstreams.init(log, self.agentConfig)
self._ddforwarder = DdForwarder(log, self.agentConfig)
# Agent performance metrics check
self._agent_metrics = None
self._metrics_checks = []
# Custom metric checks
for module_spec in [s.strip() for s in self.agentConfig.get('custom_checks', '').split(',')]:
if len(module_spec) == 0:
continue
try:
self._metrics_checks.append(modules.load(module_spec, 'Check')(log))
log.info("Registered custom check %s" % module_spec)
log.warning("Old format custom checks are deprecated. They should be moved to the checks.d interface as old custom checks will be removed in a next version")
except Exception, e:
log.exception('Unable to load custom check module %s' % module_spec)
# Resource Checks
self._resources_checks = [
ResProcesses(log, self.agentConfig)
]
def stop(self):
"""
Tell the collector to stop at the next logical point.
"""
# This is called when the process is being killed, so
# try to stop the collector as soon as possible.
# Most importantly, don't try to submit to the emitters
# because the forwarder is quite possibly already killed
# in which case we'll get a misleading error in the logs.
# Best to not even try.
self.continue_running = False
for check in self.initialized_checks_d:
check.stop()
@staticmethod
def _stats_for_display(raw_stats):
return pprint.pformat(raw_stats, indent=4)
@log_exceptions(log)
def run(self, checksd=None, start_event=True, configs_reloaded=False):
"""
Collect data from each check and submit their data.
"""
log.debug("Found {num_checks} checks".format(num_checks=len(checksd['initialized_checks'])))
timer = Timer()
if not Platform.is_windows():
cpu_clock = time.clock()
self.run_count += 1
log.debug("Starting collection run #%s" % self.run_count)
if checksd:
self.initialized_checks_d = checksd['initialized_checks'] # is a list of AgentCheck instances
self.init_failed_checks_d = checksd['init_failed_checks'] # is of type {check_name: {error, traceback}}
payload = AgentPayload()
# Find the AgentMetrics check and pop it out
# This check must run at the end of the loop to collect info on agent performance
if not self._agent_metrics or configs_reloaded:
for check in self.initialized_checks_d:
if check.name == AGENT_METRICS_CHECK_NAME:
self._agent_metrics = check
self.initialized_checks_d.remove(check)
break
# Initialize payload
self._build_payload(payload)
metrics = payload['metrics']
events = payload['events']
service_checks = payload['service_checks']
# Run the system checks. Checks will depend on the OS
if Platform.is_windows():
# Win32 system checks
try:
metrics.extend(self._win32_system_checks['memory'].check(self.agentConfig))
metrics.extend(self._win32_system_checks['cpu'].check(self.agentConfig))
metrics.extend(self._win32_system_checks['network'].check(self.agentConfig))
metrics.extend(self._win32_system_checks['io'].check(self.agentConfig))
metrics.extend(self._win32_system_checks['proc'].check(self.agentConfig))
except Exception:
log.exception('Unable to fetch Windows system metrics.')
else:
# Unix system checks
sys_checks = self._unix_system_checks
load = sys_checks['load'].check(self.agentConfig)
payload.update(load)
system = sys_checks['system'].check(self.agentConfig)
payload.update(system)
memory = sys_checks['memory'].check(self.agentConfig)
if memory:
memstats = {
'memPhysUsed': memory.get('physUsed'),
'memPhysPctUsable': memory.get('physPctUsable'),
'memPhysFree': memory.get('physFree'),
'memPhysTotal': memory.get('physTotal'),
'memPhysUsable': memory.get('physUsable'),
'memSwapUsed': memory.get('swapUsed'),
'memSwapFree': memory.get('swapFree'),
'memSwapPctFree': memory.get('swapPctFree'),
'memSwapTotal': memory.get('swapTotal'),
'memCached': memory.get('physCached'),
'memBuffers': memory.get('physBuffers'),
'memShared': memory.get('physShared')
}
payload.update(memstats)
ioStats = sys_checks['io'].check(self.agentConfig)
if ioStats:
payload['ioStats'] = ioStats
processes = sys_checks['processes'].check(self.agentConfig)
payload.update({'processes': processes})
cpuStats = sys_checks['cpu'].check(self.agentConfig)
if cpuStats:
payload.update(cpuStats)
# Run old-style checks
gangliaData = self._ganglia.check(self.agentConfig)
dogstreamData = self._dogstream.check(self.agentConfig)
ddforwarderData = self._ddforwarder.check(self.agentConfig)
if gangliaData is not False and gangliaData is not None:
payload['ganglia'] = gangliaData
# dogstream
if dogstreamData:
dogstreamEvents = dogstreamData.get('dogstreamEvents', None)
if dogstreamEvents:
if 'dogstream' in payload['events']:
events['dogstream'].extend(dogstreamEvents)
else:
events['dogstream'] = dogstreamEvents
del dogstreamData['dogstreamEvents']
payload.update(dogstreamData)
# metrics about the forwarder
if ddforwarderData:
payload['datadog'] = ddforwarderData
# Resources checks
if not Platform.is_windows():
has_resource = False
for resources_check in self._resources_checks:
resources_check.check()
snaps = resources_check.pop_snapshots()
if snaps:
has_resource = True
res_value = {
'snaps': snaps,
'format_version': resources_check.get_format_version()
}
res_format = resources_check.describe_format_if_needed()
if res_format is not None:
res_value['format_description'] = res_format
payload['resources'][resources_check.RESOURCE_KEY] = res_value
if has_resource:
payload['resources']['meta'] = {
'api_key': self.agentConfig['api_key'],
'host': payload['internalHostname'],
}
# newer-style checks (not checks.d style)
for metrics_check in self._metrics_checks:
res = metrics_check.check(self.agentConfig)
if res:
metrics.extend(res)
# checks.d checks
check_statuses = []
for check in self.initialized_checks_d:
if not self.continue_running:
return
log.info("Running check %s" % check.name)
instance_statuses = []
metric_count = 0
event_count = 0
service_check_count = 0
check_start_time = time.time()
check_stats = None
try:
# Run the check.
instance_statuses = check.run()
# Collect the metrics and events.
current_check_metrics = check.get_metrics()
current_check_events = check.get_events()
check_stats = check._get_internal_profiling_stats()
# Collect metadata
current_check_metadata = check.get_service_metadata()
# Save metrics & events for the payload.
metrics.extend(current_check_metrics)
if current_check_events:
if check.name not in events:
events[check.name] = current_check_events
else:
events[check.name] += current_check_events
# Save the status of the check.
metric_count = len(current_check_metrics)
event_count = len(current_check_events)
except Exception:
log.exception("Error running check %s" % check.name)
check_status = CheckStatus(
check.name, instance_statuses, metric_count,
event_count, service_check_count, service_metadata=current_check_metadata,
library_versions=check.get_library_info(),
source_type_name=check.SOURCE_TYPE_NAME or check.name,
check_stats=check_stats
)
# Service check for Agent checks failures
service_check_tags = ["check:%s" % check.name]
if check_status.status == STATUS_OK:
status = AgentCheck.OK
elif check_status.status == STATUS_ERROR:
status = AgentCheck.CRITICAL
check.service_check('datadog.agent.check_status', status, tags=service_check_tags)
# Collect the service checks and save them in the payload
current_check_service_checks = check.get_service_checks()
if current_check_service_checks:
service_checks.extend(current_check_service_checks)
service_check_count = len(current_check_service_checks)
# Update the check status with the correct service_check_count
check_status.service_check_count = service_check_count
check_statuses.append(check_status)
check_run_time = time.time() - check_start_time
log.debug("Check %s ran in %.2f s" % (check.name, check_run_time))
# Intrument check run timings if enabled.
if self.check_timings:
metric = 'datadog.agent.check_run_time'
meta = {'tags': ["check:%s" % check.name]}
metrics.append((metric, time.time(), check_run_time, meta))
for check_name, info in self.init_failed_checks_d.iteritems():
if not self.continue_running:
return
check_status = CheckStatus(check_name, None, None, None, None,
init_failed_error=info['error'],
init_failed_traceback=info['traceback'])
check_statuses.append(check_status)
# Add a service check for the agent
service_checks.append(create_service_check('datadog.agent.up', AgentCheck.OK,
hostname=self.hostname))
# Store the metrics and events in the payload.
payload['metrics'] = metrics
payload['events'] = events
payload['service_checks'] = service_checks
# Populate metadata
self._populate_payload_metadata(payload, check_statuses, start_event)
collect_duration = timer.step()
if self._agent_metrics:
metric_context = {
'collection_time': collect_duration,
'emit_time': self.emit_duration,
}
if not Platform.is_windows():
metric_context['cpu_time'] = time.clock() - cpu_clock
self._agent_metrics.set_metric_context(payload, metric_context)
self._agent_metrics.run()
agent_stats = self._agent_metrics.get_metrics()
payload['metrics'].extend(agent_stats)
if self.agentConfig.get('developer_mode'):
log.debug("\n Agent developer mode stats: \n {0}".format(
Collector._stats_for_display(agent_stats))
)
# Let's send our payload
emitter_statuses = payload.emit(log, self.agentConfig, self.emitters,
self.continue_running)
self.emit_duration = timer.step()
# Persist the status of the collection run.
try:
CollectorStatus(check_statuses, emitter_statuses,
self.hostname_metadata_cache).persist()
except Exception:
log.exception("Error persisting collector status")
if self.run_count <= FLUSH_LOGGING_INITIAL or self.run_count % FLUSH_LOGGING_PERIOD == 0:
log.info("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(self.emit_duration, 2)))
if self.run_count == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, next flushes will be logged every %s flushes." %
FLUSH_LOGGING_PERIOD)
else:
log.debug("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(self.emit_duration, 2)))
return payload
@staticmethod
def run_single_check(check, verbose=True):
log.info("Running check %s" % check.name)
instance_statuses = []
metric_count = 0
event_count = 0
service_check_count = 0
check_start_time = time.time()
check_stats = None
try:
# Run the check.
instance_statuses = check.run()
# Collect the metrics and events.
current_check_metrics = check.get_metrics()
current_check_events = check.get_events()
current_service_checks = check.get_service_checks()
current_service_metadata = check.get_service_metadata()
check_stats = check._get_internal_profiling_stats()
# Save the status of the check.
metric_count = len(current_check_metrics)
event_count = len(current_check_events)
service_check_count = len(current_service_checks)
print "Metrics: \n{0}".format(pprint.pformat(current_check_metrics))
print "Events: \n{0}".format(pprint.pformat(current_check_events))
print "Service Checks: \n{0}".format(pprint.pformat(current_service_checks))
print "Service Metadata: \n{0}".format(pprint.pformat(current_service_metadata))
except Exception:
log.exception("Error running check %s" % check.name)
check_status = CheckStatus(
check.name, instance_statuses, metric_count,
event_count, service_check_count,
library_versions=check.get_library_info(),
source_type_name=check.SOURCE_TYPE_NAME or check.name,
check_stats=check_stats
)
return check_status
def _emit(self, payload):
""" Send the payload via the emitters. """
statuses = []
for emitter in self.emitters:
# Don't try to send to an emitter if we're stopping/
if not self.continue_running:
return statuses
name = emitter.__name__
emitter_status = EmitterStatus(name)
try:
emitter(payload, log, self.agentConfig)
except Exception, e:
log.exception("Error running emitter: %s" % emitter.__name__)
emitter_status = EmitterStatus(name, e)
statuses.append(emitter_status)
return statuses
def _is_first_run(self):
return self.run_count <= 1
def _build_payload(self, payload):
"""
Build the payload skeleton, so it contains all of the generic payload data.
"""
now = time.time()
payload['collection_timestamp'] = now
payload['os'] = self.os
payload['python'] = sys.version
payload['agentVersion'] = self.agentConfig['version']
payload['apiKey'] = self.agentConfig['api_key']
payload['events'] = {}
payload['metrics'] = []
payload['service_checks'] = []
payload['resources'] = {}
payload['internalHostname'] = self.hostname
payload['uuid'] = get_uuid()
payload['host-tags'] = {}
payload['external_host_tags'] = {}
def _populate_payload_metadata(self, payload, check_statuses, start_event=True):
"""
Periodically populate the payload with metadata related to the system, host, and/or checks.
"""
now = time.time()
# Include system stats on first postback
if start_event and self._is_first_run():
payload['systemStats'] = self.agentConfig.get('system_stats', {})
# Also post an event in the newsfeed
payload['events']['System'] = [{
'api_key': self.agentConfig['api_key'],
'host': payload['internalHostname'],
'timestamp': now,
'event_type':'Agent Startup',
'msg_text': 'Version %s' % get_version()
}]
# Periodically send the host metadata.
if self._should_send_additional_data('host_metadata'):
# gather metadata with gohai
try:
if not Platform.is_windows():
command = "gohai"
else:
command = "gohai\gohai.exe"
gohai_metadata, gohai_err, _ = get_subprocess_output([command], log)
payload['gohai'] = gohai_metadata
if gohai_err:
log.warning("GOHAI LOG | {0}".format(gohai_err))
except OSError as e:
if e.errno == 2: # file not found, expected when install from source
log.info("gohai file not found")
else:
raise e
except Exception as e:
log.warning("gohai command failed with error %s" % str(e))
payload['systemStats'] = get_system_stats()
payload['meta'] = self._get_hostname_metadata()
self.hostname_metadata_cache = payload['meta']
# Add static tags from the configuration file
host_tags = []
if self.agentConfig['tags'] is not None:
host_tags.extend([unicode(tag.strip())
for tag in self.agentConfig['tags'].split(",")])
if self.agentConfig['collect_ec2_tags']:
host_tags.extend(EC2.get_tags(self.agentConfig))
if host_tags:
payload['host-tags']['system'] = host_tags
GCE_tags = GCE.get_tags(self.agentConfig)
if GCE_tags is not None:
payload['host-tags'][GCE.SOURCE_TYPE_NAME] = GCE_tags
# Log the metadata on the first run
if self._is_first_run():
log.info("Hostnames: %s, tags: %s" %
(repr(self.hostname_metadata_cache), payload['host-tags']))
# Periodically send extra hosts metadata (vsphere)
# Metadata of hosts that are not the host where the agent runs, not all the checks use
# that
external_host_tags = []
if self._should_send_additional_data('external_host_tags'):
for check in self.initialized_checks_d:
try:
getter = getattr(check, 'get_external_host_tags')
check_tags = getter()
external_host_tags.extend(check_tags)
except AttributeError:
pass
if external_host_tags:
payload['external_host_tags'] = external_host_tags
# Periodically send agent_checks metadata
if self._should_send_additional_data('agent_checks'):
# Add agent checks statuses and error/warning messages
agent_checks = []
for check in check_statuses:
if check.instance_statuses is not None:
for i, instance_status in enumerate(check.instance_statuses):
agent_checks.append(
(
check.name, check.source_type_name,
instance_status.instance_id,
instance_status.status,
# put error message or list of warning messages in the same field
# it will be handled by the UI
instance_status.error or instance_status.warnings or "",
check.service_metadata[i]
)
)
else:
agent_checks.append(
(
check.name, check.source_type_name,
"initialization",
check.status, repr(check.init_failed_error)
)
)
payload['agent_checks'] = agent_checks
payload['meta'] = self.hostname_metadata_cache # add hostname metadata
# If required by the user, let's create the dd_check:xxx host tags
if self.agentConfig['create_dd_check_tags'] and \
self._should_send_additional_data('dd_check_tags'):
app_tags_list = [DD_CHECK_TAG.format(c.name) for c in self.initialized_checks_d]
app_tags_list.extend([DD_CHECK_TAG.format(cname) for cname
in JMXFiles.get_jmx_appnames()])
if 'system' not in payload['host-tags']:
payload['host-tags']['system'] = []
payload['host-tags']['system'].extend(app_tags_list)
def _get_hostname_metadata(self):
"""
Returns a dictionnary that contains hostname metadata.
"""
metadata = EC2.get_metadata(self.agentConfig)
if metadata.get('hostname'):
metadata['ec2-hostname'] = metadata.get('hostname')
del metadata['hostname']
if self.agentConfig.get('hostname'):
metadata['agent-hostname'] = self.agentConfig.get('hostname')
else:
try:
metadata["socket-hostname"] = socket.gethostname()
except Exception:
pass
try:
metadata["socket-fqdn"] = socket.getfqdn()
except Exception:
pass
metadata["hostname"] = self.hostname
metadata["timezones"] = time.tzname
# Add cloud provider aliases
host_aliases = GCE.get_host_aliases(self.agentConfig)
if host_aliases:
metadata['host_aliases'] = host_aliases
return metadata
def _should_send_additional_data(self, data_name):
if self._is_first_run():
return True
# If the interval has passed, send the metadata again
now = time.time()
if now - self.push_times[data_name]['start'] >= self.push_times[data_name]['interval']:
log.debug('%s interval has passed. Sending it.' % data_name)
self.push_times[data_name]['start'] = now
return True
return False
|
|
'''
Carousel
========
.. versionadded:: 1.4.0
The :class:`Carousel` widget provides the classic mobile-friendly carousel view
where you can swipe between slides.
You can add any content to the carousel and have it move horizontally or
vertically.
The carousel can display pages in loop or not.
Example::
from kivy.app import App
from kivy.uix.carousel import Carousel
from kivy.uix.image import AsyncImage
class CarouselApp(App):
def build(self):
carousel = Carousel(direction='right')
for i in range(10):
src = "http://placehold.it/480x270.png&text=slide-%d&.png" % i
image = AsyncImage(source=src, allow_stretch=True)
carousel.add_widget(image)
return carousel
CarouselApp().run()
.. versionchanged:: 1.5.0
The carousel now supports active children, like the
:class:`~kivy.uix.scrollview.ScrollView`. It will detect a swipe gesture
according to the :attr:`Carousel.scroll_timeout` and
:attr:`Carousel.scroll_distance` properties.
In addition, the slide container is no longer exposed by the API.
The impacted properties are
:attr:`Carousel.slides`, :attr:`Carousel.current_slide`,
:attr:`Carousel.previous_slide` and :attr:`Carousel.next_slide`.
'''
__all__ = ('Carousel', )
from functools import partial
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.animation import Animation
from kivy.uix.stencilview import StencilView
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import BooleanProperty, OptionProperty, AliasProperty, \
NumericProperty, ListProperty, ObjectProperty, StringProperty
class Carousel(StencilView):
'''Carousel class. See module documentation for more information.
'''
slides = ListProperty([])
'''List of slides inside the Carousel. The slides are the
widgets added to the Carousel using the :attr:`add_widget` method.
:attr:`slides` is a :class:`~kivy.properties.ListProperty` and is
read-only.
'''
def _get_slides_container(self):
return [x.parent for x in self.slides]
slides_container = AliasProperty(_get_slides_container, None,
bind=('slides', ))
direction = OptionProperty('right',
options=('right', 'left', 'top', 'bottom'))
'''Specifies the direction in which the slides are ordered. This
corresponds to the direction from which the user swipes to go from one
slide to the next. It
can be `right`, `left`, `top`, or `bottom`. For example, with
the default value of `right`, the second slide is to the right
of the first and the user would swipe from the right towards the
left to get to the second slide.
:attr:`direction` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'right'.
'''
min_move = NumericProperty(0.2)
'''Defines the minimum distance to be covered before the touch is
considered a swipe gesture and the Carousel content changed.
This is a percentage of the Carousel width.
If the movement doesn't reach this minimum value, then the movement is
cancelled and the content is restored to its original position.
:attr:`min_move` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.2.
'''
anim_move_duration = NumericProperty(0.5)
'''Defines the duration of the Carousel animation between pages.
:attr:`anim_move_duration` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.5.
'''
anim_cancel_duration = NumericProperty(0.3)
'''Defines the duration of the animation when a swipe movement is not
accepted. This is generally when the user does not make a large enough
swipe. See :attr:`min_move`.
:attr:`anim_cancel_duration` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.3.
'''
loop = BooleanProperty(False)
'''Allow the Carousel to loop infinitely. When the user tries to swipe
beyond last page, it will return to the first.
:attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
def _get_index(self):
if self.slides:
return self._index % len(self.slides)
return None
def _set_index(self, value):
if self.slides:
self._index = value % len(self.slides)
else:
self._index = None
index = AliasProperty(_get_index, _set_index, bind=('_index', 'slides'))
'''Get/Set the current slide based on the index.
:attr:`index` is an :class:`~kivy.properties.AliasProperty` and defaults
to 0 (the first item).
'''
def _prev_slide(self):
slides = self.slides
len_slides = len(slides)
index = self.index
if len_slides < 2: # None, or 1 slide
return None
if len_slides == 2:
if index == 0:
return None
if index == 1:
return slides[0]
if self.loop and index == 0:
return slides[-1]
if index > 0:
return slides[index - 1]
previous_slide = AliasProperty(_prev_slide, None, bind=('slides', 'index'))
'''The previous slide in the Carousel. It is None if the current slide is
the first slide in the Carousel. This ordering reflects the order in which
the slides are added: their presentation varies according to the
:attr:`direction` property.
:attr:`previous_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
This property no longer exposes the slides container. It returns
the widget you have added.
'''
def _curr_slide(self):
if len(self.slides):
return self.slides[self.index]
current_slide = AliasProperty(_curr_slide, None, bind=('slides', 'index'))
'''The currently shown slide.
:attr:`current_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
The property no longer exposes the slides container. It returns
the widget you have added.
'''
def _next_slide(self):
if len(self.slides) < 2: # None, or 1 slide
return None
if len(self.slides) == 2:
if self.index == 0:
return self.slides[1]
if self.index == 1:
return None
if self.loop and self.index == len(self.slides) - 1:
return self.slides[0]
if self.index < len(self.slides) - 1:
return self.slides[self.index + 1]
next_slide = AliasProperty(_next_slide, None, bind=('slides', 'index'))
'''The next slide in the Carousel. It is None if the current slide is
the last slide in the Carousel. This ordering reflects the order in which
the slides are added: their presentation varies according to the
:attr:`direction` property.
:attr:`next_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
The property no longer exposes the slides container.
It returns the widget you have added.
'''
scroll_timeout = NumericProperty(200)
'''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.
If the user has not moved :attr:`scroll_distance` within the timeout,
no scrolling will occur and the touch event will go to the children.
:attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and
defaults to 200 (milliseconds)
.. versionadded:: 1.5.0
'''
scroll_distance = NumericProperty('20dp')
'''Distance to move before scrolling the :class:`Carousel` in pixels. As
soon as the distance has been traveled, the :class:`Carousel` will start
to scroll, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target device's
screen.
:attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and
defaults to 20dp.
.. versionadded:: 1.5.0
'''
anim_type = StringProperty('out_quad')
'''Type of animation to use while animating in the next/previous slide.
.. versionadded:: 1.8.0
'''
#### private properties, for internal use only ###
_index = NumericProperty(0, allownone=True)
_prev = ObjectProperty(None, allownone=True)
_current = ObjectProperty(None, allownone=True)
_next = ObjectProperty(None, allownone=True)
_offset = NumericProperty(0)
_touch = ObjectProperty(None, allownone=True)
def __init__(self, **kwargs):
self._trigger_position_visible_slides = Clock.create_trigger(
self._position_visible_slides, -1)
super(Carousel, self).__init__(**kwargs)
self._skip_slide = None
def load_slide(self, slide):
'''Animate to the slide that is passed as the argument.
.. versionchanged:: 1.8.0
'''
slides = self.slides
start, stop = slides.index(self.current_slide), slides.index(slide)
if start == stop:
return
self._skip_slide = stop
if stop > start:
self._insert_visible_slides(_next_slide=slide)
self.load_next()
else:
self._insert_visible_slides(_prev_slide=slide)
self.load_previous()
def load_previous(self):
'''Animate to the previous slide.
.. versionadded:: 1.7.0
'''
self.load_next(mode='prev')
def load_next(self, mode='next'):
'''Animate to next slide.
.. versionadded:: 1.7.0
'''
if not self.index is None:
w, h = self.size
_direction = {
'top': -h / 2,
'bottom': h / 2,
'left': w / 2,
'right': -w / 2}
_offset = _direction[self.direction]
if mode == 'prev':
_offset = -_offset
self._start_animation(min_move=0, offset=_offset)
def get_slide_container(self, slide):
return slide.parent
def _insert_visible_slides(self, _next_slide=None, _prev_slide=None):
get_slide_container = self.get_slide_container
previous_slide = _prev_slide if _prev_slide else self.previous_slide
if previous_slide:
self._prev = get_slide_container(previous_slide)
else:
self._prev = None
current_slide = self.current_slide
if current_slide:
self._current = get_slide_container(current_slide)
else:
self._current = None
next_slide = _next_slide if _next_slide else self.next_slide
if next_slide:
self._next = get_slide_container(next_slide)
else:
self._next = None
super_remove = super(Carousel, self).remove_widget
for container in self.slides_container:
super_remove(container)
if self._prev:
super(Carousel, self).add_widget(self._prev)
if self._next:
super(Carousel, self).add_widget(self._next)
if self._current:
super(Carousel, self).add_widget(self._current)
def _position_visible_slides(self, *args):
slides, index = self.slides, self.index
no_of_slides = len(slides) - 1
if not slides:
return
x, y, width, height = self.x, self.y, self.width, self.height
_offset, direction = self._offset, self.direction
_prev, _next, _current = self._prev, self._next, self._current
get_slide_container = self.get_slide_container
last_slide = get_slide_container(slides[-1])
first_slide = get_slide_container(slides[0])
skip_next = False
_loop = self.loop
if direction[0] in ['r', 'l']:
xoff = x + _offset
x_prev = {'l': xoff + width, 'r': xoff - width}
x_next = {'l': xoff - width, 'r': xoff + width}
if _prev:
_prev.pos = (x_prev[direction[0]], y)
elif _loop and _next and index == 0:
# if first slide is moving to right with direction set to right
# or toward left with direction set to left
if ((_offset > 0 and direction[0] == 'r') or
(_offset < 0 and direction[0] == 'l')):
# put last_slide before first slide
last_slide.pos = (x_prev[direction[0]], y)
skip_next = True
if _current:
_current.pos = (xoff, y)
if skip_next:
return
if _next:
_next.pos = (x_next[direction[0]], y)
elif _loop and _prev and index == no_of_slides:
if ((_offset < 0 and direction[0] == 'r') or
(_offset > 0 and direction[0] == 'l')):
first_slide.pos = (x_next[direction[0]], y)
if direction[0] in ['t', 'b']:
yoff = y + _offset
y_prev = {'t': yoff - height, 'b': yoff + height}
y_next = {'t': yoff + height, 'b': yoff - height}
if _prev:
_prev.pos = (x, y_prev[direction[0]])
elif _loop and _next and index == 0:
if ((_offset > 0 and direction[0] == 't') or
(_offset < 0 and direction[0] == 'b')):
last_slide.pos = (x, y_prev[direction[0]])
skip_next = True
if _current:
_current.pos = (x, yoff)
if skip_next:
return
if _next:
_next.pos = (x, y_next[direction[0]])
elif _loop and _prev and index == no_of_slides:
if ((_offset < 0 and direction[0] == 't') or
(_offset > 0 and direction[0] == 'b')):
first_slide.pos = (x, y_next[direction[0]])
def on_size(self, *args):
size = self.size
for slide in self.slides_container:
slide.size = size
self._trigger_position_visible_slides()
def on_pos(self, *args):
self._trigger_position_visible_slides()
def on_index(self, *args):
self._insert_visible_slides()
self._trigger_position_visible_slides()
self._offset = 0
def on_slides(self, *args):
if self.slides:
self.index = self.index % len(self.slides)
self._insert_visible_slides()
self._trigger_position_visible_slides()
def on__offset(self, *args):
self._trigger_position_visible_slides()
# if reached full offset, switch index to next or prev
direction = self.direction
_offset = self._offset
width = self.width
height = self.height
index = self.index
if self._skip_slide is not None or index is None:
return
if direction[0] == 'r':
if _offset <= -width:
index += 1
if _offset >= width:
index -= 1
if direction[0] == 'l':
if _offset <= -width:
index -= 1
if _offset >= width:
index += 1
if direction[0] == 't':
if _offset <= - height:
index += 1
if _offset >= height:
index -= 1
if direction[0] == 'b':
if _offset <= -height:
index -= 1
if _offset >= height:
index += 1
self.index = index
def _start_animation(self, *args, **kwargs):
# compute target offset for ease back, next or prev
new_offset = 0
direction = kwargs.get('direction', self.direction)
is_horizontal = direction[0] in ['r', 'l']
extent = self.width if is_horizontal else self.height
min_move = kwargs.get('min_move', self.min_move)
_offset = kwargs.get('offset', self._offset)
if _offset < min_move * -extent:
new_offset = -extent
elif _offset > min_move * extent:
new_offset = extent
# if new_offset is 0, it wasnt enough to go next/prev
dur = self.anim_move_duration
if new_offset == 0:
dur = self.anim_cancel_duration
# detect edge cases if not looping
len_slides = len(self.slides)
index = self.index
if not self.loop or len_slides == 1:
is_first = (index == 0)
is_last = (index == len_slides - 1)
if direction[0] in ['r', 't']:
towards_prev = (new_offset > 0)
towards_next = (new_offset < 0)
else:
towards_prev = (new_offset < 0)
towards_next = (new_offset > 0)
if (is_first and towards_prev) or (is_last and towards_next):
new_offset = 0
anim = Animation(_offset=new_offset, d=dur, t=self.anim_type)
anim.cancel_all(self)
def _cmp(*l):
if self._skip_slide is not None:
self.index = self._skip_slide
self._skip_slide = None
anim.bind(on_complete=_cmp)
anim.start(self)
def _get_uid(self, prefix='sv'):
return '{0}.{1}'.format(prefix, self.uid)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
touch.ud[self._get_uid('cavoid')] = True
return
if self.disabled:
return True
if self._touch:
return super(Carousel, self).on_touch_down(touch)
Animation.cancel_all(self)
self._touch = touch
uid = self._get_uid()
touch.grab(self)
touch.ud[uid] = {
'mode': 'unknown',
'time': touch.time_start}
Clock.schedule_once(self._change_touch_mode,
self.scroll_timeout / 1000.)
return True
def on_touch_move(self, touch):
if self._get_uid('cavoid') in touch.ud:
return
if self._touch is not touch:
super(Carousel, self).on_touch_move(touch)
return self._get_uid() in touch.ud
if touch.grab_current is not self:
return True
ud = touch.ud[self._get_uid()]
direction = self.direction
if ud['mode'] == 'unknown':
if direction[0] in ('r', 'l'):
distance = abs(touch.ox - touch.x)
else:
distance = abs(touch.oy - touch.y)
if distance > self.scroll_distance:
Clock.unschedule(self._change_touch_mode)
ud['mode'] = 'scroll'
else:
if direction[0] in ('r', 'l'):
self._offset += touch.dx
if direction[0] in ('t', 'b'):
self._offset += touch.dy
return True
def on_touch_up(self, touch):
if self._get_uid('cavoid') in touch.ud:
return
if self in [x() for x in touch.grab_list]:
touch.ungrab(self)
self._touch = None
ud = touch.ud[self._get_uid()]
if ud['mode'] == 'unknown':
Clock.unschedule(self._change_touch_mode)
super(Carousel, self).on_touch_down(touch)
Clock.schedule_once(partial(self._do_touch_up, touch), .1)
else:
self._start_animation()
else:
if self._touch is not touch and self.uid not in touch.ud:
super(Carousel, self).on_touch_up(touch)
return self._get_uid() in touch.ud
def _do_touch_up(self, touch, *largs):
super(Carousel, self).on_touch_up(touch)
# don't forget about grab event!
for x in touch.grab_list[:]:
touch.grab_list.remove(x)
x = x()
if not x:
continue
touch.grab_current = x
super(Carousel, self).on_touch_up(touch)
touch.grab_current = None
def _change_touch_mode(self, *largs):
if not self._touch:
return
self._start_animation()
uid = self._get_uid()
touch = self._touch
ud = touch.ud[uid]
if ud['mode'] == 'unknown':
touch.ungrab(self)
self._touch = None
super(Carousel, self).on_touch_down(touch)
return
def add_widget(self, widget, index=0):
slide = RelativeLayout(size=self.size, x=self.x - self.width, y=self.y)
slide.add_widget(widget)
super(Carousel, self).add_widget(slide, index)
if index != 0:
self.slides.insert(index - len(self.slides), widget)
else:
self.slides.append(widget)
def remove_widget(self, widget, *args, **kwargs):
# XXX be careful, the widget.parent refer to the RelativeLayout
# added in add_widget(). But it will break if RelativeLayout
# implementation change.
# if we passed the real widget
if widget in self.slides:
slide = widget.parent
self.slides.remove(widget)
return slide.remove_widget(widget, *args, **kwargs)
return super(Carousel, self).remove_widget(widget, *args, **kwargs)
def clear_widgets(self):
for slide in self.slides[:]:
self.remove_widget(slide)
super(Carousel, self).clear_widgets()
if __name__ == '__main__':
from kivy.app import App
class Example1(App):
def build(self):
carousel = Carousel(direction='left',
loop=True)
for i in range(4):
src = "http://placehold.it/480x270.png&text=slide-%d&.png" % i
image = Factory.AsyncImage(source=src, allow_stretch=True)
carousel.add_widget(image)
return carousel
Example1().run()
|
|
import struct
import numpy as np
from datasketch import MinHash
class LeanMinHash(MinHash):
'''Lean MinHash is MinHash with a smaller memory footprint
and faster deserialization, but with its internal state frozen
-- no `update()`.
Lean MinHash inherits all methods from :class:`datasketch.MinHash`.
It does not store the `permutations` and the `hashfunc` needed for updating.
If a MinHash does not need further updates, convert it into a lean MinHash
to save memory.
Example:
To create a lean MinHash from an existing MinHash:
.. code-block:: python
lean_minhash = LeanMinHash(minhash)
# You can compute the Jaccard similarity between two lean MinHash
lean_minhash.jaccard(lean_minhash2)
# Or between a lean MinHash and a MinHash
lean_minhash.jaccard(minhash2)
To create a lean MinHash from the hash values and seed of an existing
MinHash:
.. code-block:: python
lean_minhash = LeanMinHash(seed=minhash.seed,
hashvalues=minhash.hashvalues)
To create a MinHash from a lean MinHash:
.. code-block:: python
minhash = MinHash(seed=lean_minhash.seed,
hashvalues=lean_minhash.hashvalues)
# Or if you want to prevent further updates on minhash
# from affecting the state of lean_minhash
minhash = MinHash(seed=lean_minhash.seed,
hashvalues=lean_minhash.digest())
Note:
Lean MinHash can also be used in :class:`datasketch.MinHashLSH`,
:class:`datasketch.MinHashLSHForest`, and :class:`datasketch.MinHashLSHEnsemble`.
Args:
minhash (optional): The :class:`datasketch.MinHash` object used to
initialize the LeanMinHash. If this is not set, then `seed`
and `hashvalues` must be set.
seed (optional): The random seed that controls the set of random
permutation functions generated for this LeanMinHash. This parameter
must be used together with `hashvalues`.
hashvalues (optional): The hash values used to inititialize the state
of the LeanMinHash. This parameter must be used together with
`seed`.
'''
__slots__ = ('seed', 'hashvalues')
def _initialize_slots(self, seed, hashvalues):
'''Initialize the slots of the LeanMinHash.
Args:
seed (int): The random seed controls the set of random
permutation functions generated for this LeanMinHash.
hashvalues: The hash values is the internal state of the LeanMinHash.
'''
self.seed = seed
self.hashvalues = self._parse_hashvalues(hashvalues)
def __init__(self, minhash=None, seed=None, hashvalues=None):
if minhash is not None:
self._initialize_slots(minhash.seed, minhash.hashvalues)
elif hashvalues is not None and seed is not None:
self._initialize_slots(seed, hashvalues)
else:
raise ValueError("Init parameters cannot be None: make sure "
"to set either minhash or both of hash values and seed")
def update(self, b):
'''This method is not available on a LeanMinHash.
Calling it raises a TypeError.
'''
raise TypeError("Cannot update a LeanMinHash")
def copy(self):
lmh = object.__new__(LeanMinHash)
lmh._initialize_slots(*self.__slots__)
return lmh
def bytesize(self, byteorder='@'):
'''Compute the byte size after serialization.
Args:
byteorder (str, optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
Returns:
int: Size in number of bytes after serialization.
'''
# Use 8 bytes to store the seed integer
seed_size = struct.calcsize(byteorder+'q')
# Use 4 bytes to store the number of hash values
length_size = struct.calcsize(byteorder+'i')
# Use 4 bytes to store each hash value as we are using the lower 32 bit
hashvalue_size = struct.calcsize(byteorder+'I')
return seed_size + length_size + len(self) * hashvalue_size
def serialize(self, buf, byteorder='@'):
'''
Serialize this lean MinHash and store the result in an allocated buffer.
Args:
buf (buffer): `buf` must implement the `buffer`_ interface.
One such example is the built-in `bytearray`_ class.
byteorder (str, optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
This is preferred over using `pickle`_ if the serialized lean MinHash needs
to be used by another program in a different programming language.
The serialization schema:
1. The first 8 bytes is the seed integer
2. The next 4 bytes is the number of hash values
3. The rest is the serialized hash values, each uses 4 bytes
Example:
To serialize a single lean MinHash into a `bytearray`_ buffer.
.. code-block:: python
buf = bytearray(lean_minhash.bytesize())
lean_minhash.serialize(buf)
To serialize multiple lean MinHash into a `bytearray`_ buffer.
.. code-block:: python
# assuming lean_minhashs is a list of LeanMinHash with the same size
size = lean_minhashs[0].bytesize()
buf = bytearray(size*len(lean_minhashs))
for i, lean_minhash in enumerate(lean_minhashs):
lean_minhash.serialize(buf[i*size:])
.. _`buffer`: https://docs.python.org/3/c-api/buffer.html
.. _`bytearray`: https://docs.python.org/3.6/library/functions.html#bytearray
.. _`byteorder`: https://docs.python.org/3/library/struct.html
'''
if len(buf) < self.bytesize():
raise ValueError("The buffer does not have enough space\
for holding this MinHash.")
fmt = "%sqi%dI" % (byteorder, len(self))
struct.pack_into(fmt, buf, 0,
self.seed, len(self), *self.hashvalues)
@classmethod
def deserialize(cls, buf, byteorder='@'):
'''
Deserialize a lean MinHash from a buffer.
Args:
buf (buffer): `buf` must implement the `buffer`_ interface.
One such example is the built-in `bytearray`_ class.
byteorder (str. optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
Return:
datasketch.LeanMinHash: The deserialized lean MinHash
Example:
To deserialize a lean MinHash from a buffer.
.. code-block:: python
lean_minhash = LeanMinHash.deserialize(buf)
'''
fmt_seed_size = "%sqi" % byteorder
fmt_hash = byteorder + "%dI"
try:
seed, num_perm = struct.unpack_from(fmt_seed_size, buf, 0)
except TypeError:
seed, num_perm = struct.unpack_from(fmt_seed_size, buffer(buf), 0)
offset = struct.calcsize(fmt_seed_size)
try:
hashvalues = struct.unpack_from(fmt_hash % num_perm, buf, offset)
except TypeError:
hashvalues = struct.unpack_from(fmt_hash % num_perm, buffer(buf), offset)
lmh = object.__new__(LeanMinHash)
lmh._initialize_slots(seed, hashvalues)
return lmh
def __getstate__(self):
buf = bytearray(self.bytesize())
fmt = "qi%dI" % len(self)
struct.pack_into(fmt, buf, 0,
self.seed, len(self), *self.hashvalues)
return buf
def __setstate__(self, buf):
try:
seed, num_perm = struct.unpack_from('qi', buf, 0)
except TypeError:
seed, num_perm = struct.unpack_from('qi', buffer(buf), 0)
offset = struct.calcsize('qi')
try:
hashvalues = struct.unpack_from('%dI' % num_perm, buf, offset)
except TypeError:
hashvalues = struct.unpack_from('%dI' % num_perm, buffer(buf), offset)
self._initialize_slots(seed, hashvalues)
def __hash__(self):
return hash((self.seed, tuple(self.hashvalues)))
@classmethod
def union(cls, *lmhs):
if len(lmhs) < 2:
raise ValueError("Cannot union less than 2 MinHash")
num_perm = len(lmhs[0])
seed = lmhs[0].seed
if any((seed != m.seed or num_perm != len(m)) for m in lmhs):
raise ValueError("The unioning MinHash must have the\
same seed, number of permutation functions.")
hashvalues = np.minimum.reduce([m.hashvalues for m in lmhs])
lmh = object.__new__(LeanMinHash)
lmh._initialize_slots(seed, hashvalues)
return lmh
|
|
# Copyright (c) 2013 - 2019 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
import secrets
from difflib import SequenceMatcher
from http import cookiejar
from multiprocessing import Lock
from typing import Dict, Union, Tuple, Optional
from typing import cast
from urllib.parse import urlparse, urljoin
from urllib.parse import urlunparse
import requests
import urllib3
from requests.adapters import HTTPAdapter
from requests.models import Response, Request, PreparedRequest
from requests_mock.request import _RequestObjectProxy
from validator_collection import checkers
from yawast._version import get_version
from yawast.reporting import reporter
from yawast.shared import output, utils
from yawast.shared.exec_timer import ExecutionTimer
YAWAST_UA = (
f"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) "
f"YAWAST/{get_version()}/PY Chrome/77.0.3865.65 Safari/537.36"
)
SERVICE_UA = f"YAWAST/{get_version()}/PY"
_lock = Lock()
# class to block setting cookies from server responses
class _BlockCookiesSet(cookiejar.DefaultCookiePolicy):
def set_ok(self, cookie, request):
return False
_requester = requests.Session()
_file_not_found_handling: Dict[str, Dict[str, Union[bool, Response]]] = {}
def init(proxy: str, cookie: str, header: str) -> None:
global _requester, _file_not_found_handling
_requester.cookies.set_policy(_BlockCookiesSet())
_requester.verify = False
_requester.mount(
"http://",
HTTPAdapter(
max_retries=urllib3.Retry(total=3, read=5, connect=5, backoff_factor=0.3),
pool_maxsize=50,
pool_block=True,
),
)
_requester.mount(
"https://",
HTTPAdapter(
max_retries=urllib3.Retry(total=3, read=5, connect=5, backoff_factor=0.3),
pool_maxsize=50,
pool_block=True,
),
)
if proxy is not None and len(proxy) > 0:
# we have a proxy, set it
if not proxy.startswith("http") and "://" not in proxy:
proxy = f"http://{proxy}"
if proxy.startswith("http"):
proxies = {"http": proxy, "https": proxy}
_requester.proxies.update(proxies)
else:
output.error(
f"Invalid proxy server specified ({proxy}) - only HTTP proxy servers are supported. Proxy ignored."
)
if cookie is not None and len(cookie) > 0:
if ";" in cookie:
cookies = cookie.split(";")
else:
cookies = [cookie]
for current_cookie in cookies:
if "=" in cookie:
name = current_cookie.split("=", 1)[0]
val = current_cookie.split("=", 1)[1]
c = requests.cookies.create_cookie(name=name, value=val)
_requester.cookies.set_cookie(c)
else:
output.error(
f"Invalid cookie specified ({cookie}) - cookie must be in NAME=VALUE format. Ignored."
)
if header is not None and len(header) > 0:
if "=" in header:
name = header.split("=", 1)[0]
val = header.split("=", 1)[1]
_requester.headers.update({name: val})
elif ": " in header:
# in case they use the wire format - not officially supported, but, meh
name = header.split(": ", 1)[0]
val = header.split(": ", 1)[1]
_requester.headers.update({name: val})
else:
output.error(
f"Invalid header specified ({header}) - header must be in NAME=VALUE format. Ignored."
)
_file_not_found_handling = {}
def reset():
global _requester
_requester = requests.Session()
def http_head(
url: str, allow_redirects: Optional[bool] = True, timeout: Optional[int] = 30
) -> Response:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {"User-Agent": YAWAST_UA}
res = _requester.head(
url, headers=headers, allow_redirects=allow_redirects, timeout=timeout
)
output.debug(
f"{res.request.method}: {url} - completed ({res.status_code}) in "
f"{int(res.elapsed.total_seconds() * 1000)}ms."
)
return res
def http_options(url: str, timeout: Optional[int] = 30) -> Response:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {"User-Agent": YAWAST_UA}
res = _requester.options(url, headers=headers, timeout=timeout)
output.debug(
f"{res.request.method}: {url} - completed ({res.status_code}) in "
f"{int(res.elapsed.total_seconds() * 1000)}ms."
)
return res
def http_get(
url: str,
allow_redirects: Optional[bool] = True,
additional_headers: Union[None, Dict] = None,
timeout: Optional[int] = 30,
) -> Response:
max_size = 5 * 1024 * 1024 # 5MB
chunk_size = 10 * 1024 # 10KB - this is the default used by requests
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {"User-Agent": YAWAST_UA}
if additional_headers is not None:
headers = {**headers, **additional_headers}
res = _requester.get(
url,
headers=headers,
allow_redirects=allow_redirects,
timeout=timeout,
stream=True,
)
# if we have a content-length use that first, as it'll be a faster check
if (
"content-length" in res.headers
and int(res.headers["content-length"]) > max_size
):
raise ValueError(f"File '{url}' exceeds the maximum size of {max_size} bytes.")
length = 0
content = bytes()
for chunk in res.iter_content(chunk_size):
length += len(chunk)
content += chunk
if length > max_size:
raise ValueError(
f"File '{url}' exceeds the maximum size of {max_size} bytes."
)
# hack: set the Response's content directly, as it doesn't keep it in memory if you stream the data
res._content = content
output.debug(
f"{res.request.method}: {url} - completed ({res.status_code}) in "
f"{int(res.elapsed.total_seconds() * 1000)}ms "
f"(Body: {len(res.content)})"
)
return res
def http_put(
url: str,
data: str,
allow_redirects=True,
additional_headers: Union[None, Dict] = None,
timeout: Optional[int] = 30,
) -> Response:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {"User-Agent": YAWAST_UA}
if additional_headers is not None:
headers = {**headers, **additional_headers}
res = _requester.put(
url,
data=data,
headers=headers,
allow_redirects=allow_redirects,
timeout=timeout,
)
output.debug(
f"{res.request.method}: {url} - completed ({res.status_code}) in "
f"{int(res.elapsed.total_seconds() * 1000)}ms "
f"(Body: {len(res.content)})"
)
return res
def http_custom(
verb: str,
url: str,
additional_headers: Union[None, Dict] = None,
timeout: Optional[int] = 30,
) -> Response:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {"User-Agent": YAWAST_UA}
if additional_headers is not None:
headers = {**headers, **additional_headers}
res = _requester.request(verb, url, headers=headers, timeout=timeout)
output.debug(
f"{res.request.method}: {url} - completed ({res.status_code}) in "
f"{int(res.elapsed.total_seconds() * 1000)}ms "
f"(Body: {len(res.content)})"
)
return res
def http_json(
url, allow_redirects=True, timeout: Optional[int] = 30
) -> Tuple[Dict, int]:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {"User-Agent": SERVICE_UA}
res = _requester.get(
url, headers=headers, allow_redirects=allow_redirects, timeout=timeout
)
return res.json(), res.status_code
def http_file_exists(
url: str, allow_redirects=True, timeout: Optional[int] = 30
) -> Tuple[bool, Response]:
# first, check our 404 handling
domain = utils.get_domain(url)
_get_404_handling(domain, url)
if _file_not_found_handling[domain]["file"]:
if _file_not_found_handling[domain]["head"]:
# we have good HEAD handling - we will start with head, as it's more efficient for us
head = http_head(url, allow_redirects=allow_redirects, timeout=timeout)
# check for ok, and for server-side errors
if head.status_code == 200 or head.status_code >= 500:
# file exists, grab it
get = http_get(url, allow_redirects=allow_redirects, timeout=timeout)
return True, get
else:
return False, head
else:
# head isn't handled properly, default to GET
get = http_get(url, allow_redirects=allow_redirects, timeout=timeout)
return get.status_code == 200, get
else:
# the server doesn't handle 404s properly - there are a few different flavors of this issue, each
# different version requires a different approach
file_res = cast(Response, _file_not_found_handling[domain]["file_res"])
if file_res.status_code == 200:
# in this case, everything gets a 200, even if it doesn't exist
# to handle this, we need to look at the response, and see if we can work out if it's
# a file not found error, or something else.
get = http_get(url, allow_redirects=allow_redirects, timeout=timeout)
if response_body_is_text(file_res):
if response_body_is_text(get):
# in case the responses are the same, check that first, then move on to comparing
# this should be caught by the code below, but this is faster
if file_res.content == get.content:
return False, get
# both are text, so we need to compare to see how similar they are
with ExecutionTimer() as tm:
ratio = SequenceMatcher(None, file_res.text, get.text).ratio()
output.debug(
f"Fuzzy Matching used. Text from known 404 and '{get.url}' compared in {tm.to_ms()}ms"
)
# check to see if we have an alignment of less than 90% between the known 404, and this response
# if it's less than 90%, we will assume that the response is different, and we have a hit
# this is somewhat error prone, as it depends on details of how the application works, though
# most errors should be very similar, so the false positive rate should be low.
if ratio < 0.9:
output.debug(
f"Fuzzy Matching used. Text from known 404 and '{get.url}' have a "
f"similarity of {ratio} - assuming valid file."
)
return True, get
else:
return False, get
else:
# if file_res is text, and this isn't, safe to call this a valid hit
return True, get
else:
# this is a case that makes no sense. who knows what's going on here.
return file_res.content == get.content, get
elif file_res.status_code in range(300, 399):
# they are sending a redirect on file not found
# we can't honor the allow_redirects flag, as we can't tell if it's a legit redirect, or an error
# we should though get a 200 for valid hits
get = http_get(url, allow_redirects=False, timeout=timeout)
return get.status_code == 200, get
elif file_res.status_code >= 400:
# they are sending an error code that isn't 404 - in this case, we should still get a 200 on a valid hit
get = http_get(url, allow_redirects=allow_redirects, timeout=timeout)
return get.status_code == 200, get
else:
# shrug
get = http_get(url, allow_redirects=allow_redirects, timeout=timeout)
return get.status_code == 200, get
def http_build_raw_response(res: Response) -> str:
if res.raw.version == 11:
res_line = f"HTTP/1.1 {res.raw.status} {res.raw.reason}"
else:
res_line = f"HTTP/1.0 {res.raw.status} {res.raw.reason}"
res_string = res_line + "\r\n"
if res.raw._original_response is not None:
res_string += "\r\n".join(
str(res.raw._original_response.headers).splitlines(False)
)
else:
res_string += "\r\n".join(f"{k}: {v}" for k, v in res.headers.items())
try:
if response_body_is_text(res):
txt = res.text
if txt != "":
res_string += "\r\n\r\n"
res_string += txt
elif len(res.content) > 0:
# the body is binary - no real value in keeping it
res_string += "\r\n\r\n<BINARY DATA EXCLUDED>"
except Exception:
output.debug_exception()
return res_string
def http_build_raw_request(
req: Union[Request, PreparedRequest, _RequestObjectProxy]
) -> str:
if isinstance(req, _RequestObjectProxy):
req = req._request
headers = "\r\n".join(f"{k}: {v}" for k, v in req.headers.items())
body = ""
if req.body is not None:
body = req.body
return f"{req.method} {req.url}\r\n{headers}\r\n\r\n{body}"
def check_404_response(url: str) -> Tuple[bool, Response, bool, Response]:
domain = utils.get_domain(url)
_get_404_handling(domain, url)
return (
_file_not_found_handling[domain]["file"],
_file_not_found_handling[domain]["file_res"],
_file_not_found_handling[domain]["path"],
_file_not_found_handling[domain]["path_res"],
)
def _get_404_handling(domain: str, url: str):
with _lock:
if domain not in _file_not_found_handling:
_file_not_found_handling[domain] = {}
target = utils.extract_url(url)
rnd = secrets.token_hex(12)
file_url = urljoin(target, f"{rnd}.html")
path_url = urljoin(target, f"{rnd}/")
file_res = http_get(file_url, False)
path_res = http_get(path_url, False)
_file_not_found_handling[domain]["file"] = file_res.status_code == 404
_file_not_found_handling[domain]["file_res"] = file_res
_file_not_found_handling[domain]["path"] = path_res.status_code == 404
_file_not_found_handling[domain]["path_res"] = path_res
# check to see if HEAD returns something reasonable
head_res = http_head(file_url, False)
_file_not_found_handling[domain]["head"] = head_res.status_code == 404
_file_not_found_handling[domain]["head_res"] = head_res
def check_ssl_redirect(url):
parsed = urlparse(url)
if parsed.scheme == "https":
return url
req = http_head(url, False)
# make sure we received a redirect response
if req.status_code >= 300 & req.status_code < 400:
location = req.headers.get("location")
if location is None:
return url
try:
parsed_location = urlparse(location)
# this is a special case to handle servers that redirect to a path, and then to HTTPS
if parsed_location.netloc == "" and parsed_location.path != "":
parsed_location = parsed._replace(path=parsed_location.path)
parsed_location = urlparse(
check_ssl_redirect(urlunparse(parsed_location))
)
if parsed_location.scheme == "https":
parsed = parsed._replace(scheme=parsed_location.scheme)
return urlunparse(parsed)
except Exception:
return url
return url
def check_www_redirect(url):
parsed = urlparse(url)
req = http_head(url, False)
# make sure we received a redirect response
if req.status_code >= 300 & req.status_code < 400:
location = req.headers.get("location")
if location is None:
return url
if str(location).startswith("/"):
return url
try:
parsed_location = urlparse(location)
location_domain = utils.get_domain(parsed_location.netloc)
domain = utils.get_domain(parsed.netloc)
if (
domain.startswith("www")
and (not location_domain.startswith("www"))
and location_domain in domain
):
parsed_location = parsed._replace(netloc=parsed_location.netloc)
return urlunparse(parsed_location)
elif (
(not domain.startswith("www"))
and location_domain.startswith("www")
and domain in location_domain
):
parsed_location = parsed._replace(netloc=parsed_location.netloc)
return urlunparse(parsed_location)
except ValueError:
return url
else:
return url
def response_body_is_text(res: Response) -> bool:
"""
Returns True if the body is HTML, or at least seems like text
:param res:
:return:
"""
has_text = False
if len(res.content) == 0:
# don't bother with these, if the body is empty
has_text = False
elif "Content-Type" in res.headers and "text/html" in res.headers["Content-Type"]:
# it's HTML, go
has_text = True
elif "Content-Type" not in res.headers:
# this is something, but the server doesn't tell us what
# so, we will check to see if if we can treat it like text
if utils.is_printable_str(res.content):
has_text = True
return has_text
def check_ipv4_connection() -> str:
prefix = "IPv4 -> Internet:"
url = "https://ipv4.icanhazip.com/"
try:
res = _check_connection(url)
if not checkers.is_ipv4(res):
res = "(Unavailable)"
except Exception:
res = "(Unavailable)"
reporter.register_info("ipv4", res)
return f"{prefix} {res}"
def check_ipv6_connection() -> str:
prefix = "IPv6 -> Internet:"
url = "https://ipv6.icanhazip.com/"
try:
res = _check_connection(url)
if not checkers.is_ipv6(res):
res = "(Unavailable)"
except Exception:
res = "(Unavailable)"
reporter.register_info("ipv6", res)
return f"{prefix} {res}"
def _check_connection(url: str) -> str:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
result = "Connection Failed"
try:
headers = {"User-Agent": SERVICE_UA}
res = requests.get(url, headers=headers, verify=False)
result = res.text.strip()
except Exception:
output.debug_exception()
return result
|
|
import pyfits
import traceback
from IPython.display import HTML, display
import matplotlib.pyplot as plt
import radiopadre
import radiopadre.file
from radiopadre.render import render_title, render_table
class FITSFile(radiopadre.file.FileBase):
FITSAxisLabels = dict(STOKES=["I", "Q", "U", "V", "YX", "XY", "YY", "XX",
"LR", "RL", "LL", "RR"],
COMPLEX=["real", "imag", "weight"])
def __init__(self, *args, **kw):
radiopadre.file.FileBase.__init__(self, *args, **kw)
self._ff = self._image_data = None
def open(self):
if not self._ff:
self._ff = pyfits.open(self.fullpath)
return self._ff
def info(self):
hdr = self.open()[0].header
sizes = [str(hdr["NAXIS%d" % i]) for i in range(1, hdr["NAXIS"] + 1)]
axes = [hdr.get("CTYPE%d" % i, str(i))
for i in range(1, hdr["NAXIS"] + 1)]
print(self.path, "x".join(sizes), ",".join(axes))
@staticmethod
def _show_summary(fits_files, title=None, showpath=False):
if not fits_files:
display(HTML("0 files"))
return
if title:
display(HTML(render_title(title)))
data = []
for ff in fits_files:
name = ff.path if showpath else ff.name
size = resolution = axes = "?"
try:
hdr = pyfits.open(ff.fullpath)[0].header
naxis = hdr.get("NAXIS")
size = "×".join(
[str(hdr.get("NAXIS%d" % i)) for i in range(1, naxis + 1)])
axes = ",".join(
[hdr.get("CTYPE%d" % i, "?").split("-", 1)[0] for i in range(1, naxis + 1)])
delt = [abs(hdr.get("CDELT%d" % i, 0)) for i in (1, 2)]
resolution = []
if all(delt):
if delt[0] == delt[1]:
delt = [delt[0]]
for d in delt:
if d >= 1:
resolution.append("%.1f°" % d)
elif d >= 1 / 60.:
resolution.append("%.1f'" % (d * 60))
else:
resolution.append("%.1g\"" % (d * 3600))
resolution = "×°".join(resolution)
except:
traceback.print_exc()
data += [(name, size, resolution, axes, ff.mtime_str)]
display(HTML(render_table(data,
html=("size", "axes", "res"),
labels=("name", "size", "res", "axes", "modified"))))
@staticmethod
def _show_thumbs(fits_files,
width=None,
ncol=None,
maxwidth=None,
mincol=None,
maxcol=None,
title=None,
fs='small',
showpath=False,
**kw):
if not fits_files:
return None
if title:
display(HTML(radiopadre.render_title(title)))
nrow, ncol, width = radiopadre.file.compute_thumb_geometry(len(fits_files),
ncol, mincol,
maxcol, width,
maxwidth)
plt.figure(figsize=(width * ncol, width * nrow), dpi=radiopadre.DPI)
for iplot, ff in enumerate(fits_files):
ax = plt.subplot(nrow, ncol, iplot + 1)
ax.tick_params(labelsize=kw.get('fs_axis', fs))
ff.show(index=[0] * 10,
unroll=None,
filename_in_title=True,
make_figure=False,
fs_title='small', **kw)
def show(self,
index=0,
xyaxes=(0, 1),
unroll='STOKES',
vmin=None,
vmax=None,
cmap='cubehelix',
zoom=None,
width=None,
maxwidth=None,
ncol=None,
mincol=None,
maxcol=None,
fs='medium',
fs_title=None,
fs_axis=None,
fs_colorbar=None,
colorbar=True,
make_figure=True,
filename_in_title=False):
ff = pyfits.open(self.fullpath)
hdr = ff[0].header
# make base slice with ":" for every axis
naxis = hdr['NAXIS']
dims = [hdr['NAXIS%d' % i] for i in range(1, naxis + 1)]
axis_type = [hdr.get("CTYPE%d" % i, str(i))
for i in range(1, hdr["NAXIS"] + 1)]
baseslice = [slice(None)] * hdr['NAXIS']
# create status string
status = "%s (%s,%s)" % (self.path, axis_type[xyaxes[0]].split("-")[0],
axis_type[xyaxes[1]].split("-")[0])
title = self.basename if filename_in_title else ""
# zoom in if asked
if zoom:
x0, y0 = int(dims[xyaxes[0]] / 2), int(dims[xyaxes[1]] / 2)
xz, yz = int(dims[xyaxes[0]] / (zoom * 2)), int(dims[xyaxes[1]] /
(zoom * 2))
xlim = x0 - xz, x0 + xz
ylim = y0 - yz, y0 + yz
status += " zoom x%s" % zoom
else:
xlim = 0, dims[xyaxes[0]] - 1
ylim = 0, dims[xyaxes[1]] - 1
# the set of axes that we need to index into -- remove the XY axes
# first
remaining_axes = set(range(naxis)) - set(xyaxes)
# get axis labels. "1" to "N", unless a special axis like STOKES is
# used
axis_labels = {}
for ax in remaining_axes:
labels = self.FITSAxisLabels.get(axis_type[ax], None)
rval, rpix, delt, unit = [hdr.get("C%s%d" % (kw, ax + 1), 1)
for kw in ("RVAL", "RPIX", "DELT", "UNIT")]
if labels:
axis_labels[ax] = ["%s %s" %
(axis_type[ax], labels[int(rval - 1 + delt *
(i + 1 - rpix))])
for i in range(dims[ax])]
elif unit == 1:
axis_labels[ax] = ["%s %g" % (axis_type[ax], rval + delt *
(i + 1 - rpix))
for i in range(dims[ax])]
else:
axis_labels[ax] = ["%s %g%s" % (axis_type[ax], rval + delt *
(i + 1 - rpix), unit)
for i in range(dims[ax])]
# is there an unroll axis specified
if unroll is not None:
if type(unroll) is str:
unroll = axis_type.index(
unroll) if unroll in axis_type else None
if dims[unroll] < 2:
unroll = None
if unroll is not None:
if unroll in remaining_axes:
remaining_axes.remove(unroll)
else:
raise ValueError("unknown unroll axis %s" % unroll)
# we need enough elements in index to take care of the remaining axes
index = [index] if type(index) is int else list(index)
for remaxis in sorted(remaining_axes):
if dims[remaxis] == 1:
baseslice[remaxis] = 0
elif not index:
e = "not enough elements in index to index into axis %s" % \
axis_type[remaxis]
raise TypeError(e)
else:
baseslice[remaxis] = i = index.pop(0)
status += " " + (axis_labels[remaxis][i])
title += " " + (axis_labels[remaxis][i])
data = ff[0].data.T
# figure out image geometry and make subplots
nrow, ncol, width = radiopadre.file.compute_thumb_geometry(
1 if unroll is None else dims[unroll],
ncol, mincol, maxcol, width, maxwidth)
if unroll is None:
# show single image
fig = make_figure and plt.figure(figsize=(width, width),
dpi=radiopadre.DPI)
if fig:
plt.suptitle(self.basename)
plt.imshow(
data[tuple(baseslice)].T, vmin=vmin, vmax=vmax, cmap=cmap)
if colorbar:
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=fs or fs_colorbar)
plt.xlabel(axis_type[xyaxes[0]], fontsize=fs or fs_axis)
plt.ylabel(axis_type[xyaxes[1]], fontsize=fs or fs_axis)
plt.title(title, fontsize=fs or fs_title)
fig and fig.axes[0].tick_params(labelsize=fs or fs_axis)
plt.xlim(*xlim)
plt.ylim(*ylim)
else:
status += ", unrolling " + axis_type[unroll]
nrow, ncol, width = radiopadre.file.compute_thumb_geometry(dims[unroll],
ncol, mincol,
maxcol, width,
maxwidth)
plt.figure(figsize=(width * ncol, width * nrow), dpi=radiopadre.DPI)
plt.suptitle(self.basename)
for iplot in range(dims[unroll]):
ax = plt.subplot(nrow, ncol, iplot + 1)
ax.tick_params(labelsize=fs or fs_axis)
baseslice[unroll] = iplot
plt.imshow(data[tuple(baseslice)].T, vmin=vmin, vmax=vmax,
cmap=cmap)
plt.title(title + " " + axis_labels[unroll][iplot],
fontsize=fs or fs_title)
plt.xlabel(axis_type[xyaxes[0]], fontsize=fs or fs_axis)
plt.ylabel(axis_type[xyaxes[1]], fontsize=fs or fs_axis)
if colorbar:
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=fs or fs_colorbar)
plt.xlim(*xlim)
plt.ylim(*ylim)
return status
|
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from os.path import (
abspath,
dirname,
join,
isfile
)
import warnings
from datetime import datetime
import pandas as pd
import numpy as np
import pandas.io.data as web
from pandas.tseries.offsets import BDay
import zipfile
from io import BytesIO, StringIO
try:
# For Python 3.0 and later
from urllib.request import urlopen
except:
from urllib2 import urlopen
from . import pos
from . import txn
APPROX_BDAYS_PER_MONTH = 21
APPROX_BDAYS_PER_YEAR = 252
def pyfolio_root():
return dirname(abspath(__file__))
def data_path(name):
return join(pyfolio_root(), 'data', name)
def one_dec_places(x, pos):
"""
Adds 1/10th decimal to plot ticks.
"""
return '%.1f' % x
def percentage(x, pos):
"""
Adds percentage sign to plot ticks.
"""
return '%.0f%%' % x
def round_two_dec_places(x):
"""
Rounds a number to 1/100th decimal.
"""
return np.round(x, 2)
def get_utc_timestamp(dt):
"""
returns the Timestamp/DatetimeIndex
with either localized or converted to UTC.
Parameters
----------
dt : Timestamp/DatetimeIndex
the date(s) to be converted
Returns
-------
same type as input
date(s) converted to UTC
"""
dt = pd.to_datetime(dt)
try:
dt = dt.tz_localize('UTC')
except TypeError:
dt = dt.tz_convert('UTC')
return dt
def get_returns_cached(filepath, update_func, latest_dt, **kwargs):
"""Get returns from a cached file if the cache is recent enough,
otherwise, try to retrieve via a provided update function and
update the cache file.
Parameters
----------
filepath : str
Path to cached csv file
update_func : function
Function to call in case cache is not up-to-date.
latest_dt : pd.Timestamp (tz=UTC)
Latest datetime required in csv file.
**kwargs : Keyword arguments
Optional keyword arguments will be passed to update_func()
Returns
-------
pandas.DataFrame
DataFrame containing returns
"""
update_cache = False
if not isfile(filepath):
update_cache = True
else:
returns = pd.read_csv(filepath, index_col=0,
parse_dates=True)
returns.index = returns.index.tz_localize("UTC")
if returns.index[-1] < latest_dt:
update_cache = True
if update_cache:
returns = update_func(**kwargs)
try:
returns.to_csv(filepath)
except IOError as e:
warnings.warn('Could not update cache {}.'
'Exception: {}'.format(filepath, e),
UserWarning)
return returns
def get_symbol_from_yahoo(symbol, start=None, end=None):
"""Wrapper for pandas.io.data.get_data_yahoo().
Retrieves prices for symbol from yahoo and computes returns
based on adjusted closing prices.
Parameters
----------
symbol : str
Symbol name to load, e.g. 'SPY'
start : pandas.Timestamp compatible, optional
Start date of time period to retrieve
end : pandas.Timestamp compatible, optional
End date of time period to retrieve
Returns
-------
pandas.DataFrame
Returns of symbol in requested period.
"""
px = web.get_data_yahoo(symbol, start=start, end=end)
rets = px[['Adj Close']].pct_change().dropna()
rets.index = rets.index.tz_localize("UTC")
rets.columns = [symbol]
return rets
def default_returns_func(symbol, start=None, end=None):
"""
Gets returns for a symbol.
Queries Yahoo Finance. Attempts to cache SPY.
Parameters
----------
symbol : str
Ticker symbol, e.g. APPL.
start : date, optional
Earliest date to fetch data for.
Defaults to earliest date available.
end : date, optional
Latest date to fetch data for.
Defaults to latest date available.
Returns
-------
pd.Series
Daily returns for the symbol.
- See full explanation in tears.create_full_tear_sheet (returns).
"""
if start is None:
start = '1/1/1970'
if end is None:
end = pd.Timestamp(datetime.today()).normalize() - BDay()
start = get_utc_timestamp(start)
end = get_utc_timestamp(end)
if symbol == 'SPY':
filepath = data_path('spy.csv')
rets = get_returns_cached(filepath,
get_symbol_from_yahoo,
end,
symbol='SPY',
start='1/1/1970',
end=datetime.now())
rets = rets[start:end]
else:
rets = get_symbol_from_yahoo(symbol, start=start, end=end)
return rets[symbol]
def vectorize(func):
"""Decorator so that functions can be written to work on Series but
may still be called with DataFrames.
"""
def wrapper(df, *args, **kwargs):
if df.ndim == 1:
return func(df, *args, **kwargs)
elif df.ndim == 2:
return df.apply(func, *args, **kwargs)
return wrapper
def get_fama_french():
"""Retrieve Fama-French factors from dartmouth host.
Returns
-------
pandas.DataFrame
Percent change of Fama-French factors
"""
umd_req = urlopen('http://mba.tuck.dartmouth.edu/page'
's/faculty/ken.french/ftp/F-F_Momentum'
'_Factor_daily_CSV.zip')
factors_req = urlopen('http://mba.tuck.dartmouth.edu/pag'
'es/faculty/ken.french/ftp/F-F_Re'
'search_Data_Factors_daily_CSV.zip')
umd_zip = zipfile.ZipFile(BytesIO(umd_req.read()), 'r')
factors_zip = zipfile.ZipFile(BytesIO(factors_req.read()),
'r')
umd_csv = umd_zip.read('F-F_Momentum_Factor_daily.CSV')
umd_csv = umd_csv.decode('utf-8')
umd_csv = umd_csv.split('\r\n\r\n')[2]\
.replace('\r\n', '\n')
factors_csv = factors_zip.read('F-F_Research_Data_'
'Factors_daily.CSV')
factors_csv = factors_csv.decode('utf-8')
factors_csv = factors_csv.split('\r\n\r\n')[1]\
.replace('\r\n', '\n')
factors = pd.DataFrame.from_csv(StringIO(factors_csv), sep=',')
umd = pd.DataFrame.from_csv(StringIO(umd_csv), sep=',')
five_factors = factors.join(umd).dropna(axis=0)
five_factors = five_factors / 100
return five_factors
def load_portfolio_risk_factors(filepath_prefix=None, start=None, end=None):
"""
Loads risk factors Mkt-Rf, SMB, HML, Rf, and UMD.
Data is stored in HDF5 file. If the data is more than 2
days old, redownload from Dartmouth.
Returns
-------
five_factors : pd.DataFrame
Risk factors timeseries.
"""
if start is None:
start = '1/1/1970'
if end is None:
end = pd.Timestamp(datetime.today()).normalize() - BDay()
start = get_utc_timestamp(start)
end = get_utc_timestamp(end)
if filepath_prefix is None:
filepath = data_path('factors.csv')
else:
filepath = filepath_prefix
five_factors = get_returns_cached(filepath, get_fama_french, end)
return five_factors.loc[start:end]
def extract_rets_pos_txn_from_zipline(backtest):
"""Extract returns, positions, transactions and leverage from the
backtest data structure returned by zipline.TradingAlgorithm.run().
The returned data structures are in a format compatible with the
rest of pyfolio and can be directly passed to
e.g. tears.create_full_tear_sheet().
Parameters
----------
backtest : pd.DataFrame
DataFrame returned by zipline.TradingAlgorithm.run()
Returns
-------
returns : pd.Series
Daily returns of strategy.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Daily transaction volume and dollar ammount.
- See full explanation in tears.create_full_tear_sheet.
gross_lev : pd.Series, optional
The leverage of a strategy.
- See full explanation in tears.create_full_tear_sheet.
Example (on the Quantopian research platform)
---------------------------------------------
>>> backtest = my_algo.run()
>>> returns, positions, transactions, gross_lev =
>>> pyfolio.utils.extract_rets_pos_txn_from_zipline(backtest)
>>> pyfolio.tears.create_full_tear_sheet(returns,
>>> positions, transactions, gross_lev=gross_lev)
"""
backtest.index = backtest.index.normalize()
if backtest.index.tzinfo is None:
backtest.index = backtest.index.tz_localize('UTC')
returns = backtest.returns
gross_lev = backtest.gross_leverage
raw_positions = []
for dt, pos_row in backtest.positions.iteritems():
df = pd.DataFrame(pos_row)
df.index = [dt] * len(df)
raw_positions.append(df)
positions = pd.concat(raw_positions)
positions = pos.extract_pos(positions, backtest.ending_cash)
transactions_frame = txn.make_transaction_frame(backtest.transactions)
transactions = txn.get_txn_vol(transactions_frame)
transactions.index = transactions.index.normalize()
return returns, positions, transactions, gross_lev
# Settings dict to store functions/values that may
# need to be overridden depending on the users environment
SETTINGS = {
'returns_func': default_returns_func
}
def register_return_func(func):
"""
Registers the 'returns_func' that will be called for
retrieving returns data.
Parameters
----------
func : function
A function that returns a pandas Series of asset returns.
The signature of the function must be as follows
>>> func(symbol)
Where symbol is an asset identifier
Returns
-------
None
"""
SETTINGS['returns_func'] = func
def get_symbol_rets(symbol, start=None, end=None):
"""
Calls the currently registered 'returns_func'
Parameters
----------
symbol : object
An identifier for the asset whose return
series is desired.
e.g. ticker symbol or database ID
start : date, optional
Earliest date to fetch data for.
Defaults to earliest date available.
end : date, optional
Latest date to fetch data for.
Defaults to latest date available.
Returns
-------
pandas.Series
Returned by the current 'returns_func'
"""
return SETTINGS['returns_func'](symbol,
start=start,
end=end)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Everything needed to run classification and regression tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow.compat.v1 as tf
from bam.bert import tokenization
from bam.data import feature_spec
from bam.data import task_weighting
from bam.helpers import utils
from bam.task_specific import task
from bam.task_specific.classification import classification_metrics
class InputExample(task.Example):
"""A single training/test example for simple sequence classification."""
def __init__(self, eid, task_name, text_a, text_b=None, label=None):
super(InputExample, self).__init__(task_name)
self.eid = eid
self.text_a = text_a
self.text_b = text_b
self.label = label
class SingleOutputTask(task.Task):
"""A task with a single label per input (e.g., text classification)."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, tokenizer):
super(SingleOutputTask, self).__init__(config, name)
self._tokenizer = tokenizer
self._distill_inputs = None
def featurize(self, example, is_training):
"""Turn an InputExample into a dict of features."""
if is_training and self.config.distill and self._distill_inputs is None:
self._distill_inputs = utils.load_pickle(
self.config.distill_inputs(self.name))
tokens_a = self._tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = self._tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, self.config.max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > self.config.max_seq_length - 2:
tokens_a = tokens_a[0:(self.config.max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it
# makes it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = self._tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self.config.max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.config.max_seq_length
assert len(input_mask) == self.config.max_seq_length
assert len(segment_ids) == self.config.max_seq_length
eid = example.eid
features = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
"task_id": self.config.task_names.index(self.name),
self.name + "_eid": eid,
}
self._add_features(features, example,
None if self._distill_inputs is None else
self._distill_inputs[eid])
return features
def _load_glue(self, lines, split, text_a_loc, text_b_loc, label_loc,
skip_first_line=False, eid_offset=0, swap=False):
examples = []
for (i, line) in enumerate(lines):
if i == 0 and skip_first_line:
continue
eid = i - (1 if skip_first_line else 0) + eid_offset
text_a = tokenization.convert_to_unicode(line[text_a_loc])
if text_b_loc is None:
text_b = None
else:
text_b = tokenization.convert_to_unicode(line[text_b_loc])
if "test" in split or "diagnostic" in split:
label = self._get_dummy_label()
else:
label = tokenization.convert_to_unicode(line[label_loc])
if swap:
text_a, text_b = text_b, text_a
examples.append(InputExample(eid=eid, task_name=self.name,
text_a=text_a, text_b=text_b, label=label))
return examples
@abc.abstractmethod
def _get_dummy_label(self):
pass
@abc.abstractmethod
def _add_features(self, features, example, distill_inputs):
pass
class RegressionTask(SingleOutputTask):
"""A regression task (e.g., STS)."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, tokenizer,
min_value, max_value):
super(RegressionTask, self).__init__(config, name, tokenizer)
self._tokenizer = tokenizer
self._min_value = min_value
self._max_value = max_value
def _get_dummy_label(self):
return 0.0
def get_feature_specs(self):
feature_specs = [feature_spec.FeatureSpec(self.name + "_eid", []),
feature_spec.FeatureSpec(self.name + "_targets", [],
is_int_feature=False)]
if self.config.distill:
feature_specs.append(feature_spec.FeatureSpec(
self.name + "_distill_targets", [], is_int_feature=False))
return feature_specs
def _add_features(self, features, example, distill_inputs):
label = float(example.label)
assert self._min_value <= label <= self._max_value
label = (label - self._min_value) / self._max_value
features[example.task_name + "_targets"] = label
if distill_inputs is not None:
features[self.name + "_distill_targets"] = distill_inputs
def get_prediction_module(self, bert_model, features, is_training,
percent_done):
reprs = bert_model.get_pooled_output()
if is_training:
reprs = tf.nn.dropout(reprs, keep_prob=0.9)
predictions = tf.layers.dense(reprs, 1)
predictions = tf.squeeze(predictions, -1)
targets = features[self.name + "_targets"]
if self.config.distill:
distill_targets = features[self.name + "_distill_targets"]
if self.config.teacher_annealing:
targets = ((targets * percent_done) +
(distill_targets * (1 - percent_done)))
else:
targets = ((targets * (1 - self.config.distill_weight)) +
(distill_targets * self.config.distill_weight))
losses = tf.square(predictions - targets)
outputs = dict(
loss=losses,
predictions=predictions,
targets=features[self.name + "_targets"],
eid=features[self.name + "_eid"]
)
return losses, outputs
def get_scorer(self):
return classification_metrics.RegressionScorer()
class ClassificationTask(SingleOutputTask):
"""A classification task (e.g., MNLI)."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, tokenizer,
label_list):
super(ClassificationTask, self).__init__(config, name, tokenizer)
self._tokenizer = tokenizer
self._label_list = label_list
def _get_dummy_label(self):
return self._label_list[0]
def get_feature_specs(self):
feature_specs = [feature_spec.FeatureSpec(self.name + "_eid", []),
feature_spec.FeatureSpec(self.name + "_label_ids", [])]
if self.config.distill:
feature_specs.append(feature_spec.FeatureSpec(
self.name + "_logits", [len(self._label_list)], is_int_feature=False))
return feature_specs
def _add_features(self, features, example, distill_inputs):
label_map = {}
for (i, label) in enumerate(self._label_list):
label_map[label] = i
label_id = label_map[example.label]
features[example.task_name + "_label_ids"] = label_id
if distill_inputs is not None:
features[self.name + "_logits"] = distill_inputs
def get_prediction_module(self, bert_model, features, is_training,
percent_done):
num_labels = len(self._label_list)
reprs = bert_model.get_pooled_output()
if is_training:
reprs = tf.nn.dropout(reprs, keep_prob=0.9)
logits = tf.layers.dense(reprs, num_labels)
# probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = features[self.name + "_label_ids"]
if self.config.distill:
teacher_labels = tf.nn.softmax(features[self.name + "_logits"] / 1.0)
true_labels = tf.one_hot(label_ids, depth=num_labels, dtype=tf.float32)
if self.config.teacher_annealing:
labels = ((true_labels * percent_done) +
(teacher_labels * (1 - percent_done)))
else:
labels = ((true_labels * (1 - self.config.distill_weight)) +
(teacher_labels * self.config.distill_weight))
else:
labels = tf.one_hot(label_ids, depth=num_labels, dtype=tf.float32)
losses = -tf.reduce_sum(labels * log_probs, axis=-1)
outputs = dict(
loss=losses,
logits=logits,
predictions=tf.argmax(logits, axis=-1),
label_ids=label_ids,
eid=features[self.name + "_eid"],
)
return losses, outputs
def get_scorer(self):
return classification_metrics.AccuracyScorer()
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class MNLI(ClassificationTask):
"""Multi-NLI."""
def __init__(self, config, tokenizer):
super(MNLI, self).__init__(config, "mnli", tokenizer,
["contradiction", "entailment", "neutral"])
def get_examples(self, split):
if split == "dev":
split += "_matched"
return self.load_data(split + ".tsv", split)
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
if split == "diagnostic":
examples += self._load_glue(lines, split, 1, 2, None, True)
else:
examples += self._load_glue(lines, split, 8, 9, -1, True)
return examples
def get_test_splits(self):
return ["test_matched", "test_mismatched", "diagnostic"]
class MRPC(ClassificationTask):
"""Microsoft Research Paraphrase Corpus."""
def __init__(self, config, tokenizer):
super(MRPC, self).__init__(config, "mrpc", tokenizer, ["0", "1"])
def _create_examples(self, lines, split):
examples = []
offset = 0
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(lines, split, 3, 4, 0, True)
if not offset:
offset = len(examples)
if self.config.double_unordered and split == "train":
examples += self._load_glue(lines, split, 3, 4, 0, True, offset, True)
return examples
class CoLA(ClassificationTask):
"""Corpus of Linguistic Acceptability."""
def __init__(self, config, tokenizer):
super(CoLA, self).__init__(config, "cola", tokenizer, ["0", "1"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(
lines, split, 1 if split == "test" else 3, None, 1, split == "test")
return examples
def get_scorer(self):
return classification_metrics.MCCScorer()
class SST(ClassificationTask):
"""Stanford Sentiment Treebank."""
def __init__(self, config, tokenizer):
super(SST, self).__init__(config, "sst", tokenizer, ["0", "1"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
if "test" in split:
examples += self._load_glue(lines, split, 1, None, None, True)
else:
examples += self._load_glue(lines, split, 0, None, 1, True)
return examples
class QQP(ClassificationTask):
"""Quora Question Pair."""
def __init__(self, config, tokenizer):
super(QQP, self).__init__(config, "qqp", tokenizer, ["0", "1"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(lines, split, 1 if split == "test" else 3,
2 if split == "test" else 4, 5, True)
return examples
class RTE(ClassificationTask):
"""Recognizing Textual Entailment."""
def __init__(self, config, tokenizer):
super(RTE, self).__init__(config, "rte", tokenizer,
["entailment", "not_entailment"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(lines, split, 1, 2, 3, True)
return examples
class QNLI(ClassificationTask):
"""Question NLI."""
def __init__(self, config, tokenizer):
super(QNLI, self).__init__(config, "qnli", tokenizer,
["entailment", "not_entailment"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(lines, split, 1, 2, 3, True)
return examples
class TREC(ClassificationTask):
"""Question Type Classification."""
def __init__(self, config, tokenizer):
super(TREC, self).__init__(config, "trec", tokenizer,
["num", "loc", "hum", "desc", "enty", "abbr"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(lines, split, 0, None, 1, False)
return examples
class STS(RegressionTask):
"""Semantic Textual Similarity."""
def __init__(self, config, tokenizer):
super(STS, self).__init__(config, "sts", tokenizer, 0.0, 5.0)
def _create_examples(self, lines, split):
examples = []
offset = 0
for _ in range(task_weighting.get_task_multiple(self, split)):
if split == "test":
examples += self._load_glue(lines, split, -2, -1, None, True)
else:
examples += self._load_glue(lines, split, -3, -2, -1, True)
if not offset:
offset = len(examples)
if self.config.double_unordered and split == "train":
examples += self._load_glue(
lines, split, -3, -2, -1, True, offset, True)
return examples
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import datetime as dt
import mock
# External imports
# Bokeh imports
from bokeh.core.validation import check_integrity
from .utils.property_utils import check_properties_existence
# Module under test
from bokeh.models import Range1d, DataRange1d, FactorRange
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_Range1d(object):
def test_basic(self):
r = Range1d()
check_properties_existence(r, [
"callback",
"start",
"end",
"reset_start",
"reset_end",
"bounds",
"min_interval",
"max_interval"],
)
def test_init_with_timedelta(self):
range1d = Range1d(start=-dt.timedelta(seconds=5), end=dt.timedelta(seconds=3))
assert range1d.start == -dt.timedelta(seconds=5)
assert range1d.end == dt.timedelta(seconds=3)
assert range1d.bounds is None
def test_init_with_datetime(self):
range1d = Range1d(start=dt.datetime(2016, 4, 28, 2, 20, 50), end=dt.datetime(2017, 4, 28, 2, 20, 50))
assert range1d.start == dt.datetime(2016, 4, 28, 2, 20, 50)
assert range1d.end == dt.datetime(2017, 4, 28, 2, 20, 50)
assert range1d.bounds is None
def test_init_with_float(self):
range1d = Range1d(start=-1.0, end=3.0)
assert range1d.start == -1.0
assert range1d.end == 3.0
assert range1d.bounds is None
def test_init_with_int(self):
range1d = Range1d(start=-1, end=3)
assert range1d.start == -1
assert range1d.end == 3
assert range1d.bounds is None
def test_init_with_positional_arguments(self):
range1d = Range1d(1, 2)
assert range1d.start == 1
assert range1d.end == 2
assert range1d.bounds is None
def test_init_with_keyword_arguments(self):
range1d = Range1d(start=1, end=2)
assert range1d.start == 1
assert range1d.end == 2
assert range1d.bounds is None
def test_cannot_initialize_with_both_keyword_and_positional_arguments(self):
with pytest.raises(ValueError):
Range1d(1, 2, start=1, end=2)
def test_cannot_initialize_with_three_positional_arguments(self):
with pytest.raises(ValueError):
Range1d(1, 2, 3)
def test_with_max_bound_smaller_than_min_bounded_raises_valueerror(self):
with pytest.raises(ValueError):
Range1d(1, 2, bounds=(1, 0))
with pytest.raises(ValueError):
Range1d(1, 2, bounds=[1, 0])
def test_bounds_with_text_rejected_as_the_correct_value_error(self):
with pytest.raises(ValueError) as e:
Range1d(1, 2, bounds="21") # The string is indexable, so this may not fail properly
assert e.value.args[0].startswith('expected an element of either')
def test_bounds_with_three_item_tuple_raises_valueerror(self):
with pytest.raises(ValueError):
Range1d(1, 2, bounds=(0, 1, 2))
class Test_DataRange1d(object):
def test_basic(self):
r = DataRange1d()
check_properties_existence(r, [
"callback",
"names",
"renderers",
"range_padding",
"range_padding_units",
"flipped",
"follow",
"follow_interval",
"default_span",
"start",
"end",
"bounds",
"min_interval",
"max_interval"],
)
def test_init_with_no_arguments(self):
datarange1d = DataRange1d()
assert datarange1d.start is None
assert datarange1d.end is None
assert datarange1d.bounds is None
def test_init_with_timedelta(self):
datarange1d = DataRange1d(start=-dt.timedelta(seconds=5), end=dt.timedelta(seconds=3))
assert datarange1d.start == -dt.timedelta(seconds=5)
assert datarange1d.end == dt.timedelta(seconds=3)
assert datarange1d.bounds is None
def test_init_with_datetime(self):
datarange1d = DataRange1d(start=dt.datetime(2016, 4, 28, 2, 20, 50), end=dt.datetime(2017, 4, 28, 2, 20, 50))
assert datarange1d.start == dt.datetime(2016, 4, 28, 2, 20, 50)
assert datarange1d.end == dt.datetime(2017, 4, 28, 2, 20, 50)
assert datarange1d.bounds is None
def test_init_with_float(self):
datarange1d = DataRange1d(start=-1.0, end=3.0)
assert datarange1d.start == -1.0
assert datarange1d.end == 3.0
assert datarange1d.bounds is None
def test_init_with_int(self):
datarange1d = DataRange1d(start=-1, end=3)
assert datarange1d.start == -1
assert datarange1d.end == 3
assert datarange1d.bounds is None
def test_init_with_follow_sets_bounds_to_none(self):
datarange1d = DataRange1d(follow="start")
assert datarange1d.follow == "start"
assert datarange1d.bounds is None
def test_init_with_bad_bounds(self):
with pytest.raises(ValueError):
DataRange1d(1, 2, bounds=(1, 0))
with pytest.raises(ValueError):
DataRange1d(1, 2, bounds=[1, 0])
with pytest.raises(ValueError):
Range1d(1, 2, bounds="21")
class Test_FactorRange(object):
def test_basic(self):
r = FactorRange()
check_properties_existence(r, [
"callback",
"factors",
"factor_padding",
"group_padding",
"subgroup_padding",
"range_padding",
"range_padding_units",
"start",
"end",
"bounds",
"min_interval",
"max_interval"],
)
def test_init_defauls(self):
factor_range = FactorRange("a", "b")
assert factor_range.factors == ["a", "b"]
assert factor_range.range_padding == 0
assert factor_range.range_padding_units == "percent"
assert factor_range.factor_padding == 0
assert factor_range.group_padding == 1.4
assert factor_range.subgroup_padding == 0.8
assert factor_range.bounds == None
assert factor_range.min_interval == None
assert factor_range.max_interval == None
def test_init_with_positional_arguments(self):
factor_range = FactorRange("a", "b")
assert factor_range.factors == ["a", "b"]
factor_range = FactorRange(["a", "x"], ["b", "y"])
assert factor_range.factors == [["a", "x"], ["b", "y"]]
factor_range = FactorRange(["a", "x", "1'"], ["b", "y", "2"])
assert factor_range.factors == [["a", "x", "1'"], ["b", "y", "2"]]
def test_init_with_keyword_arguments(self):
factor_range = FactorRange(factors=["a", "b", "c", "d", "e"])
assert factor_range.factors == ["a", "b", "c", "d", "e"]
def test_cannot_initialize_with_both_keyword_and_positional_arguments(self):
with pytest.raises(ValueError):
FactorRange(["a", "b", "c"], factors=["a", "b", "c"])
def test_duplicate_factors_raises_validation_error(self):
r = FactorRange("foo", "bar", "foo")
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([r])
assert mock_logger.error.call_count == 1
r = FactorRange(factors=[("foo", "a"), ("foo", "b"), ("foo", "a")])
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([r])
assert mock_logger.error.call_count == 1
r = FactorRange(factors=[("foo", "a", "1"), ("foo", "a", "2"), ("foo", "a", "1")])
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([r])
assert mock_logger.error.call_count == 1
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import getpass
import tempfile
from copy import copy
from resource_management.libraries.functions.version import compare_versions
from resource_management import *
from resource_management.core import shell
def setup_users():
"""
Creates users before cluster installation
"""
import params
should_create_users_and_groups = False
if params.host_sys_prepped:
should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups
else:
should_create_users_and_groups = not params.ignore_groupsusers_create
if should_create_users_and_groups:
for group in params.group_list:
Group(group,
)
for user in params.user_list:
User(user,
uid = get_uid(user) if params.override_uid == "true" else None,
gid = params.user_to_gid_dict[user],
groups = params.user_to_groups_dict[user],
fetch_nonlocal_groups = params.fetch_nonlocal_groups,
)
if params.override_uid == "true":
set_uid(params.smoke_user, params.smoke_user_dirs)
else:
Logger.info('Skipping setting uid for smoke user as host is sys prepped')
else:
Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
pass
if params.has_hbase_masters:
Directory (params.hbase_tmp_dir,
owner = params.hbase_user,
mode=0775,
create_parents = True,
cd_access="a",
)
if params.override_uid == "true":
set_uid(params.hbase_user, params.hbase_user_dirs)
else:
Logger.info('Skipping setting uid for hbase user as host is sys prepped')
if should_create_users_and_groups:
if params.has_namenode:
create_dfs_cluster_admins()
if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
create_tez_am_view_acls()
else:
Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
def create_dfs_cluster_admins():
"""
dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
"""
import params
groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
User(params.hdfs_user,
groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
fetch_nonlocal_groups = params.fetch_nonlocal_groups
)
def create_tez_am_view_acls():
"""
tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
"""
import params
if not params.tez_am_view_acls.startswith("*"):
create_users_and_groups(params.tez_am_view_acls)
def create_users_and_groups(user_and_groups):
import params
parts = re.split('\s+', user_and_groups)
if len(parts) == 1:
parts.append("")
users_list = parts[0].split(",") if parts[0] else []
groups_list = parts[1].split(",") if parts[1] else []
# skip creating groups and users if * is provided as value.
users_list = filter(lambda x: x != '*' , users_list)
groups_list = filter(lambda x: x != '*' , groups_list)
if users_list:
User(users_list,
fetch_nonlocal_groups = params.fetch_nonlocal_groups
)
if groups_list:
Group(copy(groups_list),
)
return groups_list
def set_uid(user, user_dirs):
"""
user_dirs - comma separated directories
"""
import params
File(format("{tmp_dir}/changeUid.sh"),
content=StaticFile("changeToSecureUid.sh"),
mode=0555)
ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
uid = get_uid(user, return_existing=True)
Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {new_uid}", new_uid=0 if uid is None else uid),
not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
def get_uid(user, return_existing=False):
"""
Tries to get UID for username. It will try to find UID in custom properties in *cluster_env* and, if *return_existing=True*,
it will try to return UID of existing *user*.
:param user: username to get UID for
:param return_existing: return UID for existing user
:return:
"""
import params
user_str = str(user) + "_uid"
service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
if service_env and params.config['configurations'][service_env[0]][user_str]:
service_env_str = str(service_env[0])
uid = params.config['configurations'][service_env_str][user_str]
if len(service_env) > 1:
Logger.warning("Multiple values found for %s, using %s" % (user_str, uid))
return uid
else:
if return_existing:
# pick up existing UID or try to find available UID in /etc/passwd, see changeToSecureUid.sh for more info
if user == params.smoke_user:
return None
File(format("{tmp_dir}/changeUid.sh"),
content=StaticFile("changeToSecureUid.sh"),
mode=0555)
code, newUid = shell.call(format("{tmp_dir}/changeUid.sh {user}"))
return int(newUid)
else:
# do not return UID for existing user, used in User resource call to let OS to choose UID for us
return None
def setup_hadoop_env():
import params
stackversion = params.stack_version_unformatted
Logger.info("FS Type: {0}".format(params.dfs_type))
if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
if params.security_enabled:
tc_owner = "root"
else:
tc_owner = params.hdfs_user
# create /etc/hadoop
Directory(params.hadoop_dir, mode=0755)
# write out hadoop-env.sh, but only if the directory exists
if os.path.exists(params.hadoop_conf_dir):
File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
group=params.user_group,
content=InlineTemplate(params.hadoop_env_sh_template))
# Create tmp dir for java.io.tmpdir
# Handle a situation when /tmp is set to noexec
Directory(params.hadoop_java_io_tmpdir,
owner=params.hdfs_user,
group=params.user_group,
mode=01777
)
def setup_java():
"""
Installs jdk using specific params, that comes from ambari-server
"""
import params
java_exec = format("{java_home}/bin/java")
if not os.path.isfile(java_exec):
if not params.jdk_name: # if custom jdk is used.
raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
jdk_curl_target = format("{tmp_dir}/{jdk_name}")
java_dir = os.path.dirname(params.java_home)
Directory(params.artifact_dir,
create_parents = True,
)
File(jdk_curl_target,
content = DownloadSource(format("{jdk_location}/{jdk_name}")),
not_if = format("test -f {jdk_curl_target}")
)
File(jdk_curl_target,
mode = 0755,
)
tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
try:
if params.jdk_name.endswith(".bin"):
chmod_cmd = ("chmod", "+x", jdk_curl_target)
install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
elif params.jdk_name.endswith(".gz"):
chmod_cmd = ("chmod","a+x", java_dir)
install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
Directory(java_dir
)
Execute(chmod_cmd,
sudo = True,
)
Execute(install_cmd,
)
finally:
Directory(tmp_java_dir, action="delete")
File(format("{java_home}/bin/java"),
mode=0755,
cd_access="a",
)
Execute(('chmod', '-R', '755', params.java_home),
sudo = True,
)
|
|
import asyncio
import logging
import os
import aiohttp.web
from aiohttp.web_exceptions import HTTPBadRequest, HTTPConflict, HTTPNoContent
from aiohttp.web_fileresponse import FileResponse
from sqlalchemy import exc, select
from sqlalchemy.ext.asyncio import AsyncSession
import virtool.jobs.db
import virtool.pg.utils
import virtool.subtractions.db
import virtool.uploads.db
import virtool.validators
from virtool.api.response import NotFound, json_response
from virtool.api.utils import compose_regex_query, get_query_bool, paginate
from virtool.db.transforms import apply_transforms
from virtool.db.utils import get_new_id
from virtool.http.routes import Routes
from virtool.http.schema import schema
from virtool.jobs.utils import JobRights
from virtool.subtractions.db import PROJECTION, attach_computed
from virtool.subtractions.files import create_subtraction_file, delete_subtraction_file
from virtool.subtractions.models import SubtractionFile
from virtool.subtractions.utils import FILES
from virtool.uploads.models import Upload
from virtool.uploads.utils import naive_writer
from virtool.users.db import AttachUserTransform
from virtool.utils import base_processor
logger = logging.getLogger("subtractions")
routes = Routes()
BASE_QUERY = {"deleted": False}
@routes.get("/subtractions")
async def find(req):
db = req.app["db"]
ready = get_query_bool(req, "ready")
short = get_query_bool(req, "short")
term = req.query.get("find")
db_query = dict()
if term:
db_query = compose_regex_query(term, ["name", "nickname"])
if ready:
db_query["ready"] = True
if short:
documents = list()
async for document in db.subtraction.find(
{**db_query, **BASE_QUERY}, ["name", "ready"]
).sort("name"):
documents.append(base_processor(document))
return json_response(documents)
data = await paginate(
db.subtraction,
db_query,
req.query,
base_query=BASE_QUERY,
sort="name",
projection=PROJECTION,
)
documents, ready_count = await asyncio.gather(
apply_transforms(
data["documents"], [AttachUserTransform(db, ignore_errors=True)]
),
db.subtraction.count_documents({"ready": True}),
)
return json_response({**data, "documents": documents, "ready_count": ready_count})
@routes.get("/subtractions/{subtraction_id}")
@routes.jobs_api.get("/subtractions/{subtraction_id}")
async def get(req):
"""
Get a complete host document.
"""
db = req.app["db"]
subtraction_id = req.match_info["subtraction_id"]
document = await db.subtraction.find_one(subtraction_id)
if not document:
raise NotFound()
document = await attach_computed(req.app, document)
return json_response(
await apply_transforms(
base_processor(document), [AttachUserTransform(db, ignore_errors=True)]
)
)
@routes.post("/subtractions", permission="modify_subtraction")
@schema(
{
"name": {
"type": "string",
"coerce": virtool.validators.strip,
"empty": False,
"required": True,
},
"nickname": {
"type": "string",
"coerce": virtool.validators.strip,
"default": "",
},
"upload_id": {"type": "integer", "required": True},
}
)
async def create(req):
"""
Add a new subtraction. Starts an :class:`.CreateSubtraction` job process.
"""
db = req.app["db"]
pg = req.app["pg"]
data = req["data"]
name = data["name"]
nickname = data["nickname"]
upload_id = data["upload_id"]
upload_record = await virtool.pg.utils.get_row_by_id(pg, Upload, upload_id)
if upload_record is None:
raise HTTPBadRequest(text="File does not exist")
filename = upload_record.name
user_id = req["client"].user_id
document = await virtool.subtractions.db.create(
db, user_id, filename, name, nickname, upload_id
)
subtraction_id = document["_id"]
task_args = {
"subtraction_id": subtraction_id,
"files": [{"id": upload_id, "name": filename}],
}
rights = JobRights()
rights.subtractions.can_read(subtraction_id)
rights.subtractions.can_modify(subtraction_id)
rights.subtractions.can_remove(subtraction_id)
rights.uploads.can_read(upload_id)
job_id = await get_new_id(db.jobs)
await virtool.jobs.db.create(
db, "create_subtraction", task_args, user_id, rights, job_id=job_id
)
await req.app["jobs"].enqueue(job_id)
headers = {"Location": f"/subtraction/{subtraction_id}"}
document = await attach_computed(req.app, document)
document = await apply_transforms(document, [AttachUserTransform(db)])
return json_response(base_processor(document), headers=headers, status=201)
@routes.jobs_api.put("/subtractions/{subtraction_id}/files/{filename}")
async def upload(req):
"""Upload a new subtraction file."""
db = req.app["db"]
pg = req.app["pg"]
subtraction_id = req.match_info["subtraction_id"]
filename = req.match_info["filename"]
document = await db.subtraction.find_one(subtraction_id)
if document is None:
raise NotFound()
if filename not in FILES:
raise NotFound("Unsupported subtraction file name")
file_type = virtool.subtractions.utils.check_subtraction_file_type(filename)
try:
subtraction_file = await create_subtraction_file(
pg, subtraction_id, file_type, filename
)
except exc.IntegrityError:
raise HTTPConflict(text="File name already exists")
upload_id = subtraction_file["id"]
path = req.app["config"].data_path / "subtractions" / subtraction_id / filename
try:
size = await naive_writer(req, path)
except asyncio.CancelledError:
logger.debug(f"Subtraction file upload aborted: {upload_id}")
await delete_subtraction_file(pg, upload_id)
return aiohttp.web.Response(status=499)
subtraction_file = await virtool.uploads.db.finalize(
pg, size, upload_id, SubtractionFile
)
headers = {"Location": f"/subtractions/{subtraction_id}/files/{filename}"}
return json_response(subtraction_file, headers=headers, status=201)
@routes.patch("/subtractions/{subtraction_id}", permission="modify_subtraction")
@schema(
{
"name": {
"type": "string",
"coerce": virtool.validators.strip,
"empty": False,
},
"nickname": {"type": "string", "coerce": virtool.validators.strip},
}
)
async def edit(req):
"""
Updates the nickname for an existing subtraction.
"""
db = req.app["db"]
data = req["data"]
subtraction_id = req.match_info["subtraction_id"]
update = dict()
try:
update["name"] = data["name"]
except KeyError:
pass
try:
update["nickname"] = data["nickname"]
except KeyError:
pass
document = await db.subtraction.find_one_and_update(
{"_id": subtraction_id}, {"$set": update}
)
if document is None:
raise NotFound()
document = await attach_computed(req.app, document)
return json_response(
await apply_transforms(
base_processor(document), [AttachUserTransform(db, ignore_errors=True)]
)
)
@routes.delete("/subtractions/{subtraction_id}", permission="modify_subtraction")
async def remove(req):
subtraction_id = req.match_info["subtraction_id"]
updated_count = await asyncio.shield(
virtool.subtractions.db.delete(req.app, subtraction_id)
)
if updated_count == 0:
raise NotFound()
raise HTTPNoContent
@routes.jobs_api.patch("/subtractions/{subtraction_id}")
@schema(
{
"gc": {"type": "dict", "required": True},
"count": {"type": "integer", "required": True},
}
)
async def finalize_subtraction(req: aiohttp.web.Request):
"""
Sets the gc field for an subtraction and marks it as ready.
"""
db = req.app["db"]
pg = req.app["pg"]
data = await req.json()
subtraction_id = req.match_info["subtraction_id"]
document = await db.subtraction.find_one(subtraction_id)
if document is None:
raise NotFound()
if "ready" in document and document["ready"]:
raise HTTPConflict(text="Subtraction has already been finalized")
document = await virtool.subtractions.db.finalize(
db, pg, subtraction_id, data["gc"], data["count"]
)
document = await attach_computed(req.app, document)
return json_response(
await apply_transforms(base_processor(document), [AttachUserTransform(db)])
)
@routes.jobs_api.delete("/subtractions/{subtraction_id}")
async def job_remove(req: aiohttp.web.Request):
"""
Remove a subtraction document. Only usable in the Jobs API and when subtractions are
unfinalized.
"""
db = req.app["db"]
subtraction_id = req.match_info["subtraction_id"]
document = await db.subtraction.find_one(subtraction_id)
if document is None:
raise NotFound()
if "ready" in document and document["ready"]:
raise HTTPConflict(text="Only unfinalized subtractions can be deleted")
await virtool.subtractions.db.delete(req.app, subtraction_id)
raise HTTPNoContent
@routes.get("/subtractions/{subtraction_id}/files/{filename}")
@routes.jobs_api.get("/subtractions/{subtraction_id}/files/{filename}")
async def download_subtraction_files(req: aiohttp.web.Request):
"""
Download a Bowtie2 index file or a FASTA file for the given subtraction.
"""
db = req.app["db"]
pg = req.app["pg"]
subtraction_id = req.match_info["subtraction_id"]
filename = req.match_info["filename"]
document = await db.subtraction.find_one(subtraction_id)
if document is None:
raise NotFound()
if filename not in FILES:
raise HTTPBadRequest(text="Unsupported subtraction file name")
async with AsyncSession(pg) as session:
result = (
await session.execute(
select(SubtractionFile).filter_by(
subtraction=subtraction_id, name=filename
)
)
).scalar()
if not result:
raise NotFound()
file = result.to_dict()
file_path = (
virtool.subtractions.utils.join_subtraction_path(
req.app["config"], subtraction_id
)
/ filename
)
if not os.path.isfile(file_path):
raise NotFound()
return FileResponse(
file_path,
headers={
"Content-Length": file["size"],
"Content-Type": "application/octet-stream",
},
)
|
|
"""The tests for the Tasmota binary sensor platform."""
import copy
from datetime import timedelta
import json
from unittest.mock import patch
from hatasmota.utils import (
get_topic_stat_result,
get_topic_stat_status,
get_topic_tele_sensor,
get_topic_tele_will,
)
from homeassistant.components import binary_sensor
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import (
ATTR_ASSUMED_STATE,
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
import homeassistant.util.dt as dt_util
from .test_common import (
DEFAULT_CONFIG,
help_test_availability,
help_test_availability_discovery_update,
help_test_availability_poll_state,
help_test_availability_when_connection_lost,
help_test_discovery_device_remove,
help_test_discovery_removal,
help_test_discovery_update_unchanged,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
)
from tests.common import async_fire_mqtt_message, async_fire_time_changed
async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test periodic state update
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"ON"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"OFF"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test polled state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test force update flag
entity = hass.data["entity_components"]["binary_sensor"].get_entity(
"binary_sensor.tasmota_binary_sensor_1"
)
assert entity.force_update
async def test_controlling_state_via_mqtt_switchname(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Custom Name"
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.custom_name")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Custom Name":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Custom Name":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
# Test periodic state update
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Custom Name":"ON"}')
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Custom Name":"OFF"}')
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
# Test polled state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Custom Name":"ON"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Custom Name":"OFF"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
async def test_pushon_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 13
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test periodic state update is ignored
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"ON"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test polled state update is ignored
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
async def test_friendly_names(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swc"][1] = 1
config["swn"][1] = "Beer"
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert state.attributes.get("friendly_name") == "Tasmota binary_sensor 1"
state = hass.states.get("binary_sensor.beer")
assert state.state == "unavailable"
assert state.attributes.get("friendly_name") == "Beer"
async def test_off_delay(hass, mqtt_mock, setup_tasmota):
"""Test off_delay option."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 13 # PUSHON: 1s off_delay
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event.data["new_state"].state)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
assert events == ["off"]
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
assert events == ["off", "on"]
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
assert events == ["off", "on", "on"]
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert events == ["off", "on", "on", "off"]
async def test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test availability after MQTT disconnection."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, binary_sensor.DOMAIN, config
)
async def test_availability(hass, mqtt_mock, setup_tasmota):
"""Test availability."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability(hass, mqtt_mock, binary_sensor.DOMAIN, config)
async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test availability discovery update."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability_discovery_update(
hass, mqtt_mock, binary_sensor.DOMAIN, config
)
async def test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test polling after MQTT connection (re)established."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
poll_topic = "tasmota_49A3BC/cmnd/STATUS"
await help_test_availability_poll_state(
hass,
mqtt_client_mock,
mqtt_mock,
binary_sensor.DOMAIN,
config,
poll_topic,
"10",
)
async def test_discovery_removal_binary_sensor(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered binary_sensor."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config2 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = 1
config2["swc"][0] = 0
config1["swn"][0] = "Test"
config2["swn"][0] = "Test"
await help_test_discovery_removal(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_binary_sensor(
hass, mqtt_mock, caplog, setup_tasmota
):
"""Test update of discovered binary_sensor."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
with patch(
"homeassistant.components.tasmota.binary_sensor.TasmotaBinarySensor.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, config, discovery_update
)
async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
unique_id = f"{DEFAULT_CONFIG['mac']}_binary_sensor_switch_0"
await help_test_discovery_device_remove(
hass, mqtt_mock, binary_sensor.DOMAIN, unique_id, config
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
topics = [
get_topic_stat_result(config),
get_topic_tele_sensor(config),
get_topic_stat_status(config, 10),
get_topic_tele_will(config),
]
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, binary_sensor.DOMAIN, config, topics
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test MQTT discovery update when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, binary_sensor.DOMAIN, config
)
|
|
"""The tests for the Template automation."""
from datetime import timedelta
from unittest import mock
import pytest
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
import homeassistant.components.automation as automation
from tests.common import (
async_fire_time_changed, assert_setup_component, mock_component)
from tests.components.automation import common
from tests.common import async_mock_service
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, 'test', 'automation')
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, 'group')
hass.states.async_set('test.entity', 'hello')
async def test_if_fires_on_change_bool(hass, calls):
"""Test for firing on boolean change."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ true }}',
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 1 == len(calls)
await common.async_turn_off(hass)
await hass.async_block_till_done()
hass.states.async_set('test.entity', 'planet')
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_change_str(hass, calls):
"""Test for firing on change."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ "true" }}',
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_change_str_crazy(hass, calls):
"""Test for firing on change."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ "TrUE" }}',
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_not_fires_on_change_bool(hass, calls):
"""Test for not firing on boolean change."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ false }}',
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_if_not_fires_on_change_str(hass, calls):
"""Test for not firing on string change."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': 'true',
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_if_not_fires_on_change_str_crazy(hass, calls):
"""Test for not firing on string change."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ "Anything other than true is false." }}',
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_if_fires_on_no_change(hass, calls):
"""Test for firing on no change."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ true }}',
},
'action': {
'service': 'test.automation'
}
}
})
await hass.async_block_till_done()
cur_len = len(calls)
hass.states.async_set('test.entity', 'hello')
await hass.async_block_till_done()
assert cur_len == len(calls)
async def test_if_fires_on_two_change(hass, calls):
"""Test for firing on two changes."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ true }}',
},
'action': {
'service': 'test.automation'
}
}
})
# Trigger once
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 1 == len(calls)
# Trigger again
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_change_with_template(hass, calls):
"""Test for firing on change with template."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ is_state("test.entity", "world") }}',
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_not_fires_on_change_with_template(hass, calls):
"""Test for not firing on change with template."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ is_state("test.entity", "hello") }}',
},
'action': {
'service': 'test.automation'
}
}
})
await hass.async_block_till_done()
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_change_with_template_advanced(hass, calls):
"""Test for firing on change with template advanced."""
context = Context()
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ is_state("test.entity", "world") }}'
},
'action': {
'service': 'test.automation',
'data_template': {
'some':
'{{ trigger.%s }}' % '}} - {{ trigger.'.join((
'platform', 'entity_id', 'from_state.state',
'to_state.state', 'for'))
},
}
}
})
await hass.async_block_till_done()
hass.states.async_set('test.entity', 'world', context=context)
await hass.async_block_till_done()
assert 1 == len(calls)
assert calls[0].context.parent_id == context.id
assert 'template - test.entity - hello - world - None' == \
calls[0].data['some']
async def test_if_fires_on_no_change_with_template_advanced(hass, calls):
"""Test for firing on no change with template advanced."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '''{%- if is_state("test.entity", "world") -%}
true
{%- else -%}
false
{%- endif -%}''',
},
'action': {
'service': 'test.automation'
}
}
})
# Different state
hass.states.async_set('test.entity', 'worldz')
await hass.async_block_till_done()
assert 0 == len(calls)
# Different state
hass.states.async_set('test.entity', 'hello')
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_if_fires_on_change_with_template_2(hass, calls):
"""Test for firing on change with template."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template':
'{{ not is_state("test.entity", "world") }}',
},
'action': {
'service': 'test.automation'
}
}
})
await hass.async_block_till_done()
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set('test.entity', 'home')
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set('test.entity', 'work')
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set('test.entity', 'not_home')
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set('test.entity', 'home')
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action(hass, calls):
"""Test for firing if action."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': [{
'condition': 'template',
'value_template': '{{ is_state("test.entity", "world") }}'
}],
'action': {
'service': 'test.automation'
}
}
})
# Condition is not true yet
hass.bus.async_fire('test_event')
await hass.async_block_till_done()
assert 0 == len(calls)
# Change condition to true, but it shouldn't be triggered yet
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
# Condition is true and event is triggered
hass.bus.async_fire('test_event')
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_change_with_bad_template(hass, calls):
"""Test for firing on change with bad template."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ ',
},
'action': {
'service': 'test.automation'
}
}
})
async def test_if_fires_on_change_with_bad_template_2(hass, calls):
"""Test for firing on change with bad template."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ xyz | round(0) }}',
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_wait_template_with_trigger(hass, calls):
"""Test using wait template with 'trigger.entity_id'."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template':
"{{ states.test.entity.state == 'world' }}",
},
'action': [
{'wait_template':
"{{ is_state(trigger.entity_id, 'hello') }}"},
{'service': 'test.automation',
'data_template': {
'some':
'{{ trigger.%s }}' % '}} - {{ trigger.'.join((
'platform', 'entity_id', 'from_state.state',
'to_state.state', 'for'))
}}
],
}
})
await hass.async_block_till_done()
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
hass.states.async_set('test.entity', 'hello')
await hass.async_block_till_done()
assert 1 == len(calls)
assert 'template - test.entity - hello - world - None' == \
calls[0].data['some']
async def test_if_fires_on_change_with_for(hass, calls):
"""Test for firing on change with for."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': "{{ is_state('test.entity', 'world') }}",
'for': {
'seconds': 5
},
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_change_with_for_advanced(hass, calls):
"""Test for firing on change with for advanced."""
context = Context()
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ is_state("test.entity", "world") }}',
'for': {
'seconds': 5
},
},
'action': {
'service': 'test.automation',
'data_template': {
'some':
'{{ trigger.%s }}' % '}} - {{ trigger.'.join((
'platform', 'entity_id', 'from_state.state',
'to_state.state', 'for'))
},
}
}
})
await hass.async_block_till_done()
hass.states.async_set('test.entity', 'world', context=context)
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert 1 == len(calls)
assert calls[0].context.parent_id == context.id
assert 'template - test.entity - hello - world - 0:00:05' == \
calls[0].data['some']
async def test_if_fires_on_change_with_for_0(hass, calls):
"""Test for firing on change with for: 0."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': "{{ is_state('test.entity', 'world') }}",
'for': {
'seconds': 0
},
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_change_with_for_0_advanced(hass, calls):
"""Test for firing on change with for: 0 advanced."""
context = Context()
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ is_state("test.entity", "world") }}',
'for': {
'seconds': 0
},
},
'action': {
'service': 'test.automation',
'data_template': {
'some':
'{{ trigger.%s }}' % '}} - {{ trigger.'.join((
'platform', 'entity_id', 'from_state.state',
'to_state.state', 'for'))
},
}
}
})
await hass.async_block_till_done()
hass.states.async_set('test.entity', 'world', context=context)
await hass.async_block_till_done()
assert 1 == len(calls)
assert calls[0].context.parent_id == context.id
assert 'template - test.entity - hello - world - 0:00:00' == \
calls[0].data['some']
async def test_if_fires_on_change_with_for_2(hass, calls):
"""Test for firing on change with for."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': "{{ is_state('test.entity', 'world') }}",
'for': 5,
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_not_fires_on_change_with_for(hass, calls):
"""Test for firing on change with for."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': "{{ is_state('test.entity', 'world') }}",
'for': {
'seconds': 5
},
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=4))
await hass.async_block_till_done()
assert 0 == len(calls)
hass.states.async_set('test.entity', 'hello')
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=6))
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_if_not_fires_when_turned_off_with_for(hass, calls):
"""Test for firing on change with for."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': "{{ is_state('test.entity', 'world') }}",
'for': {
'seconds': 5
},
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=4))
await hass.async_block_till_done()
assert 0 == len(calls)
await common.async_turn_off(hass)
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=6))
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_if_fires_on_change_with_for_template_1(hass, calls):
"""Test for firing on change with for template."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': "{{ is_state('test.entity', 'world') }}",
'for': {
'seconds': "{{ 5 }}"
},
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_change_with_for_template_2(hass, calls):
"""Test for firing on change with for template."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': "{{ is_state('test.entity', 'world') }}",
'for': "{{ 5 }}",
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_change_with_for_template_3(hass, calls):
"""Test for firing on change with for template."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': "{{ is_state('test.entity', 'world') }}",
'for': "00:00:{{ 5 }}",
},
'action': {
'service': 'test.automation'
}
}
})
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_invalid_for_template_1(hass, calls):
"""Test for invalid for template."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': "{{ is_state('test.entity', 'world') }}",
'for': {
'seconds': "{{ five }}"
},
},
'action': {
'service': 'test.automation'
}
}
})
with mock.patch.object(automation.template, '_LOGGER') as mock_logger:
hass.states.async_set('test.entity', 'world')
await hass.async_block_till_done()
assert mock_logger.error.called
|
|
# -*- coding: utf-8 -*-
# Copyright 2016 upvm Contributors (see CONTRIBUTORS.md file in source)
# License: Apache License 2.0 (see LICENSE file in source)
# Modules from standard library
from __future__ import print_function
import subprocess
import os
import tempfile
from sys import exit
import pwd, grp
import json
from stat import S_ISBLK
# Custom modules
from . import cfg
from . import string_ops as c
# Set username
myUser = pwd.getpwuid(os.getuid()).pw_name
def ret(returnCode):
"""Return True if *returnCode* is 0."""
if returnCode == 0:
return True
else:
return False
def call(cmd, showStdout=False, showStderr=False, shell=False):
"""Execute *cmd* and return True on success."""
c.debug("Executing: {}".format(" ".join(cmd)))
null = open(os.devnull, 'w')
out = err = None
if not showStdout:
out = null
if not showStderr:
err = null
rc = subprocess.call(cmd, shell=shell, stdout=out, stderr=err)
null.close()
return ret(rc)
def convert_size_string_to_bytes(size):
unit = size[-1:]
sz = float(size[:-1])
if unit == 'b':
return sz
elif unit == 'K':
return sz * 1024**1
elif unit == 'M':
return sz * 1024**2
elif unit == 'G':
return sz * 1024**3
elif unit == 'T':
return sz * 1024**4
elif unit == 'P':
return sz * 1024**5
elif unit == 'E':
return sz * 1024**6
else:
print(c.RED("Invalid size specified (use e.g., '256M', '10G')"))
exit(1)
def print_template_metadata(template):
print("{0:17}: {1}".format("template", template['os-version']))
for key in template:
if not key in 'notes' and not key in 'os-version':
print("{0:17}: {1}".format(key, template[key]))
if template.get('notes'):
if template['notes'].get('C'):
print("notes:\n{}".format(template['notes']['C']))
else:
print("notes:\n{}".format(template['notes']))
def check_virtbuilder_version():
if not call(['virt-builder', '--version']):
print(c.RED("Error executing virt-builder; try installing libguestfs-tools rpm\n"))
exit(1)
def if_no_template_requested_then_print_vblist_and_quit():
if cfg.opts.list:
# If cmdline -l/--list: print virt-builder --list & quit
print(cfg.get_virt_builder_list())
exit()
if not cfg.opts.templateName:
# No template specified: print error & give choice to show virt-builder --list & quit
print(c.YELLOW("No template specified"))
cfg.prompt_for_template_and_exit()
def check_if_requested_template_exists():
if not cfg.opts.templateName in cfg.templateChoices:
print(c.YELLOW("Invalid template specified"))
cfg.prompt_for_template_and_exit()
def isolate_metadata_for_chosen_vb_template():
"""Save json for chosen virt-builder template."""
for template in cfg.templateList:
if template['os-version'] == cfg.opts.templateName and template['arch'] == cfg.opts.arch:
cfg.templateInfo = template
def if_metadata_requested_then_print_and_quit():
if cfg.opts.showMetadataOnly:
print_template_metadata(cfg.templateInfo)
exit()
def check_virsh_version():
if not call(['virsh', '--version']):
print(c.RED("Error executing virsh; try installing libvirt-client rpm\n"))
exit(1)
def check_virtinstall_version():
if not call(['virt-install', '--version']):
print(c.RED("Error executing virt-install; try installing virt-install rpm\n"))
exit(1)
def testconnect_hypervisor():
"""Exit if unable to connect via virsh to default URI."""
# Set default libvirt hypervisor connection to local system if unset
os.environ['LIBVIRT_DEFAULT_URI'] = os.environ.get('LIBVIRT_DEFAULT_URI', 'qemu:///system')
if not call(['virsh', 'uri'], showStderr=True):
print(c.RED("Error connecting to {}".format(os.environ['LIBVIRT_DEFAULT_URI'])))
if os.environ['LIBVIRT_DEFAULT_URI'] == 'qemu:///system':
print("You need to execute the initial setup program -- as root run:")
print(c.green(" /usr/share/{}/initial-setup".format(cfg.prog)))
else:
print("The environment variable 'LIBVIRT_DEFAULT_URI' is customized")
print("If this is not intentional, remove the declaration from your shell config")
exit(1)
def check_for_missing_imgdir():
if not os.path.isdir(cfg.opts.img_dir):
print(c.RED("Image dir '{}' does not exist".format(cfg.opts.img_dir)))
if cfg.opts.img_dir == '/var/lib/{}'.format(cfg.prog):
print("You need to execute the initial setup program -- as root run:")
print(c.green(" /usr/share/{}/initial-setup".format(cfg.prog)))
exit(1)
def check_user_in_libvirt_group():
if os.getuid() and myUser not in grp.getgrnam('libvirt').gr_mem:
print(c.RED("Current user ({}) not in the 'libvirt' system group".format(myUser)))
if os.path.isdir('/var/lib/{}'.format(cfg.prog)):
print("As root, run:")
print(c.green(" usermod -aG libvirt {}".format(myUser)))
else:
print("You need to execute the initial setup program -- as root run:")
print(c.green(" /usr/share/{}/initial-setup".format(cfg.prog)))
exit(1)
def check_for_writable_imgdir():
c.debug("Testing write perms by creating tempfile in {}".format(cfg.opts.img_dir))
try:
f = tempfile.TemporaryFile(dir=cfg.opts.img_dir)
f.close()
except:
dirstat = os.stat(cfg.opts.img_dir)
user = pwd.getpwuid(dirstat.st_uid).pw_name
group = grp.getgrgid(dirstat.st_gid).gr_name
print(c.RED("Unable to create new file in image dir '{}' owned by {}:{}".format(cfg.opts.img_dir, user, group)))
if myUser in grp.getgrnam(group).gr_mem:
print("Your user ({}) *is* a member of the appropriate group ({}); however ...\n"
"Your current login session is not running with that group credential\n"
"To fix this, open a new session (ssh/su -) or log out & log back in (GUI)".format(myUser, group))
else:
print("Either fix directory permissions as root or specify alternate dir with '--img-dir' option")
exit(1)
def try_capture_existing_vm_names():
"""Capture list of existing VM names and exit on failure."""
cmd = ['virsh', 'list', '--all', '--name']
c.debug("Executing: {}".format(" ".join(cmd)))
try:
cfg.guestList = subprocess.check_output(cmd)
except:
print(c.RED("\nUnknown error executing '{}'".format(" ".join(cmd))))
exit(1)
def validate_image_size():
if cfg.opts.img_size:
return convert_size_string_to_bytes(cfg.opts.img_size)
else:
return float(cfg.templateInfo['size'])
def validate_blockdev(imgsize):
blockdev = cfg.opts.primary_blockdev
try:
mode = os.stat(blockdev).st_mode
except OSError, e:
print(c.RED(e))
print("It looks like you passed the wrong thing to the --primary-blockdev option")
exit(1)
if not S_ISBLK(mode):
print(c.RED("File passed as an arg to the --primary-blockdev option is not a block device"))
print("This option is meant to be used to create a VM whose primary storage is a block device like /dev/sdb2")
exit(1)
cmd = ['lsblk', '-no', 'size', blockdev]
c.debug("Executing: {}".format(" ".join(cmd)))
try:
blksize = subprocess.check_output(cmd).strip()
except:
print(c.RED("Unexpected error using lsblk to check size of --primary-blockdev device"))
print("This shouldn't happen ... unless you somehow don't have util-linux and the lsblk command")
exit(1)
blksize = convert_size_string_to_bytes(blksize)
if imgsize > blksize:
print(c.YELLOW("Aborting because block device '{}' is smaller than projected image size".format(blockdev)))
print("Block device size: {} MiB".format(int(blksize/1024/1024)))
print("Image file size: {} MiB".format(int(imgsize/1024/1024)))
exit(1)
def check_sudo_blockdevHelper_rights():
blockdev, imgdir = cfg.opts.primary_blockdev, cfg.opts.img_dir
cmd = ['sudo', '-l', cfg.blockdevHelper, blockdev, imgdir]
if not call(cmd):
print(c.YELLOW("Need root privileges to write image to {}".format(blockdev)))
print(c.yellow(
"You've requested {} write a VM disk image out to a block device but you\n"
"are not root. To do this, give yourself the appropriate sudo access by\n"
"running the following command as root:".format(cfg.prog)))
print(c.green(" echo '{} ALL=(ALL) NOPASSWD: {}' >>/etc/sudoers.d/{}\n".format(myUser, cfg.blockdevHelper, cfg.prog)))
print(c.yellow(
"If you want to restrict your user to only have write access to a specific\n"
"LVM volume group, you could instead do something like:"))
print(c.green(" echo '{} ALL=(ALL) NOPASSWD: {} /dev/volgroup*' >>/etc/sudoers.d/{}\n".format(myUser, cfg.blockdevHelper, cfg.prog)))
exit(1)
def check_system_config():
##check_virtbuilder_version()
if_no_template_requested_then_print_vblist_and_quit()
check_if_requested_template_exists()
isolate_metadata_for_chosen_vb_template()
if_metadata_requested_then_print_and_quit()
check_virsh_version()
check_virtinstall_version()
testconnect_hypervisor()
check_for_missing_imgdir()
##check_user_in_libvirt_group()
check_for_writable_imgdir()
try_capture_existing_vm_names()
imgsize = validate_image_size()
if cfg.opts.primary_blockdev:
validate_blockdev(imgsize)
check_sudo_blockdevHelper_rights()
|
|
__author__ = 'Tomasz J. Kotarba <tomasz@kotarba.net>'
__copyright__ = 'Copyright (c) 2014, Tomasz J. Kotarba. All rights reserved.'
from django.test import TestCase
from fm.models import Area
from fm.models import Facility
from fm.models import Contact
from fm.models import Role
class AreaModelTest(TestCase):
def test_default_field_values(self):
area = Area.objects.create()
self.assertEqual(area.area_name, '')
def test_parent_set_correctly_on_adding_a_child(self):
area1 = Area.objects.create()
area2 = Area.objects.create()
area2.area_children.add(area1)
area1 = Area.objects.get(id=area1.id)
area2 = Area.objects.get(id=area2.id)
self.assertEqual(area1.area_parent.id, area2.id)
self.assertIn(area1, area2.area_children.all())
def test_child_added_correctly_on_setting_a_parent(self):
area1 = Area.objects.create()
area2 = Area.objects.create()
area1.area_parent = area2
area1.save()
area2 = Area.objects.get(id=area2.id)
area1 = Area.objects.get(id=area1.id)
self.assertEqual(area1.area_parent.id, area2.id)
self.assertIn(area1, area2.area_children.all())
def test_by_default_area_string_contains_its_name_zone_type_and_path(self):
area0 = Area.objects.create(area_name='Area 0', area_type='State')
area1 = Area.objects.create(area_name='Area 1', area_type='State Zone',
area_parent=area0)
area2 = Area.objects.create(area_name='Area 2', area_type='LGA',
area_parent=area1)
self.assertEqual(u'Area 2 (LGA in Area 1 in Area 0)',
area2.__unicode__())
self.assertEqual('Area 2 (LGA in Area 1 in Area 0)', area2.__str__())
def test_child_preserved_and_its_parent_set_to_null_on_parent_delete(self):
area1 = Area.objects.create(area_name='1')
area2 = Area.objects.create(area_name='2')
area1.area_children.add(area2)
area1 = Area.objects.get(id=area1.id)
area2 = Area.objects.get(id=area2.id)
area1.delete()
area2 = Area.objects.get(id=area2.id)
self.assertIsNone(area2.area_parent)
self.assertNotIn(area1, Area.objects.all())
self.assertIn(area2, Area.objects.all())
def test_parent_preserved_and_the_deleted_child_not_in_children(self):
area1 = Area.objects.create(area_name='1')
area2 = Area.objects.create(area_name='2')
area1.area_children.add(area2)
area1 = Area.objects.get(id=area1.id)
area2 = Area.objects.get(id=area2.id)
area2.delete()
area1 = Area.objects.get(id=area1.id)
self.assertNotIn(area2, Area.objects.all())
self.assertIn(area1, Area.objects.all())
self.assertEqual(0, area1.area_children.count())
class FacilityModelTest(TestCase):
def test_default_field_values(self):
facility = Facility.objects.create()
self.assertEqual(facility.facility_name, '')
self.assertEqual(facility.facility_status, '')
self.assertEqual(facility.facility_area, None)
self.assertEqual(facility.json, None)
def test_area_set_correctly_on_adding_a_facility_to_an_area(self):
facility = Facility.objects.create()
area = Area.objects.create()
area.area_facilities.add(facility)
facility = Facility.objects.get(id=facility.id)
area = Area.objects.get(id=area.id)
self.assertEqual(facility.facility_area.id, area.id)
self.assertIn(facility, area.area_facilities.all())
def test_child_added_correctly_on_setting_a_parent(self):
facility = Facility.objects.create()
area = Area.objects.create()
facility.facility_area = area
facility.save()
area = Area.objects.get(id=area.id)
facility = Facility.objects.get(id=facility.id)
self.assertEqual(facility.facility_area.id, area.id)
self.assertIn(facility, area.area_facilities.all())
def test_by_default_facility_string_contains_its_name_status_and_area(self):
area = Area.objects.create(
area_name='Area 51', area_type='State Zone')
facility = Facility.objects.create(facility_name='Facility 0',
facility_status='some status',
facility_area=area)
self.assertEqual(u'Facility 0 [some status] in Area 51 (State Zone)',
facility.__unicode__())
self.assertEqual('Facility 0 [some status] in Area 51 (State Zone)',
facility.__str__())
def test_facility_preserved_and_its_area_set_to_null_on_area_delete(self):
area = Area.objects.create()
facility = Facility.objects.create()
area.area_facilities.add(facility)
area = Area.objects.get(id=area.id)
facility = Facility.objects.get(id=facility.id)
area.delete()
facility = Facility.objects.get(id=facility.id)
self.assertIsNone(facility.facility_area)
self.assertNotIn(area, Area.objects.all())
self.assertIn(facility, Facility.objects.all())
def test_area_preserved_and_the_deleted_facility_not_in_facilities(self):
area = Area.objects.create()
facility = Facility.objects.create()
area.area_facilities.add(facility)
area = Facility.objects.get(id=area.id)
facility = Facility.objects.get(id=facility.id)
facility.delete()
area = Area.objects.get(id=area.id)
self.assertNotIn(facility, Facility.objects.all())
self.assertIn(area, Area.objects.all())
self.assertEqual(0, area.area_facilities.count())
class ContactModelTest(TestCase):
def test_default_field_values(self):
contact = Contact.objects.create()
self.assertEqual(contact.contact_name, '')
self.assertEqual(contact.contact_phone, '')
self.assertEqual(contact.contact_email, '')
self.assertEqual(contact.json, None)
def test_by_default_contact_string_contains_its_name(self):
contact = Contact.objects.create(contact_name='Contact 0',
contact_phone='055555',
contact_email='a@b.cc')
self.assertEqual(u'Contact 0 <a@b.cc>', contact.__unicode__())
self.assertEqual('Contact 0 <a@b.cc>', contact.__str__())
class RoleModelTest(TestCase):
def test_default_field_values(self):
role = Role.objects.create()
self.assertEqual(role.role_name, '')
self.assertEqual(role.role_contact, None)
self.assertEqual(role.role_facility, None)
def test_role_and_contact_association_bidirectional_from_contact(self):
role = Role.objects.create()
contact = Contact.objects.create()
contact.contact_roles.add(role)
role = Role.objects.get(id=role.id)
contact = Contact.objects.get(id=contact.id)
self.assertEqual(role.role_contact.id, contact.id)
self.assertIn(role, contact.contact_roles.all())
def test_role_and_contact_association_bidirectional_from_role(self):
role = Role.objects.create()
contact = Contact.objects.create()
role.role_contact = contact
role.save()
contact = Contact.objects.get(id=contact.id)
role = Role.objects.get(id=role.id)
self.assertEqual(role.role_contact.id, contact.id)
self.assertIn(role, contact.contact_roles.all())
def test_by_default_role_string_contains_its_name_and_facility_name(self):
contact = Contact.objects.create(contact_name='contact 1')
facility = Facility.objects.create(
facility_name='Hyperion', facility_type='Zonal Store',
facility_status='status1')
role = Role.objects.create(role_name='SCCO',
role_contact=contact,
role_facility=facility)
self.assertEqual(u'SCCO @ Hyperion [status1]',
role.__unicode__())
self.assertEqual('SCCO @ Hyperion [status1]',
role.__str__())
def test_role_deleted_on_contact_delete(self):
contact = Contact.objects.create()
role = Role.objects.create()
contact.contact_roles.add(role)
contact = Contact.objects.get(id=contact.id)
role = Role.objects.get(id=role.id)
self.assertIn(role, contact.contact_roles.all())
contact.delete()
self.assertNotIn(contact, Contact.objects.all())
self.assertNotIn(role, Role.objects.all())
def test_role_deleted_on_facility_delete(self):
facility = Facility.objects.create()
role = Role.objects.create()
facility.facility_roles.add(role)
facility = Facility.objects.get(id=facility.id)
role = Role.objects.get(id=role.id)
self.assertIn(role, facility.facility_roles.all())
facility.delete()
self.assertNotIn(facility, Facility.objects.all())
self.assertNotIn(role, Role.objects.all())
def test_contacts_and_facilities_kept_clean_upon_role_deletion(self):
contact = Contact.objects.create()
facility = Facility.objects.create()
role = Role.objects.create()
contact.contact_roles.add(role)
facility.facility_roles.add(role)
role = Role.objects.get(id=role.id)
self.assertIn(role, contact.contact_roles.all())
self.assertIn(role, facility.facility_roles.all())
role.delete()
self.assertNotIn(role, Role.objects.all())
self.assertIn(contact, Contact.objects.all())
self.assertIn(facility, Facility.objects.all())
contact = Contact.objects.get(id=contact.id)
facility = Facility.objects.get(id=facility.id)
self.assertNotIn(role, contact.contact_roles.all())
self.assertNotIn(role, facility.facility_roles.all())
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8137
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import hashlib
import logging
import os
import re
import select
import threading
import time
from contextlib import closing
from six import string_types
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.nailgun_client import NailgunClient
from pants.pantsd.process_manager import FingerprintedProcessManager, ProcessGroup
from pants.util.dirutil import read_file, safe_file_dump, safe_open
logger = logging.getLogger(__name__)
class NailgunProcessGroup(ProcessGroup):
_NAILGUN_KILL_LOCK = threading.Lock()
def __init__(self, metadata_base_dir=None):
super(NailgunProcessGroup, self).__init__(name='nailgun', metadata_base_dir=metadata_base_dir)
# TODO: this should enumerate the .pids dir first, then fallback to ps enumeration (& warn).
def _iter_nailgun_instances(self, everywhere=False):
def predicate(proc):
if proc.name() == NailgunExecutor._PROCESS_NAME:
if not everywhere:
return NailgunExecutor._PANTS_NG_BUILDROOT_ARG in proc.cmdline()
else:
return any(arg.startswith(NailgunExecutor._PANTS_NG_ARG_PREFIX) for arg in proc.cmdline())
return self.iter_instances(predicate)
def killall(self, everywhere=False):
"""Kills all nailgun servers started by pants.
:param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;
otherwise restricts the nailguns killed to those started for the
current build root.
"""
with self._NAILGUN_KILL_LOCK:
for proc in self._iter_nailgun_instances(everywhere):
logger.info('killing nailgun server pid={pid}'.format(pid=proc.pid))
proc.terminate()
# TODO: Once we integrate standard logging into our reporting framework, we can consider making
# some of the log.debug() below into log.info(). Right now it just looks wrong on the console.
class NailgunExecutor(Executor, FingerprintedProcessManager):
"""Executes java programs by launching them in nailgun server.
If a nailgun is not available for a given set of jvm args and classpath, one is launched and
re-used for the given jvm args and classpath on subsequent runs.
"""
# 'NGServer 0.9.1 started on 127.0.0.1, port 53785.'
_NG_PORT_REGEX = re.compile(r'.*\s+port\s+(\d+)\.$')
# Used to identify if we own a given nailgun server.
FINGERPRINT_CMD_KEY = b'-Dpants.nailgun.fingerprint'
_PANTS_NG_ARG_PREFIX = b'-Dpants.buildroot'
_PANTS_OWNER_ARG_PREFIX = b'-Dpants.nailgun.owner'
_PANTS_NG_BUILDROOT_ARG = '='.join((_PANTS_NG_ARG_PREFIX, get_buildroot()))
_NAILGUN_SPAWN_LOCK = threading.Lock()
_SELECT_WAIT = 1
_PROCESS_NAME = b'java'
def __init__(self, identity, workdir, nailgun_classpath, distribution,
connect_timeout=10, connect_attempts=5, metadata_base_dir=None):
Executor.__init__(self, distribution=distribution)
FingerprintedProcessManager.__init__(self,
name=identity,
process_name=self._PROCESS_NAME,
metadata_base_dir=metadata_base_dir)
if not isinstance(workdir, string_types):
raise ValueError('Workdir must be a path string, not: {workdir}'.format(workdir=workdir))
self._identity = identity
self._workdir = workdir
self._ng_stdout = os.path.join(workdir, 'stdout')
self._ng_stderr = os.path.join(workdir, 'stderr')
self._nailgun_classpath = maybe_list(nailgun_classpath)
self._connect_timeout = connect_timeout
self._connect_attempts = connect_attempts
def __str__(self):
return 'NailgunExecutor({identity}, dist={dist}, pid={pid} socket={socket})'.format(
identity=self._identity, dist=self._distribution, pid=self.pid, socket=self.socket)
def _create_owner_arg(self, workdir):
# Currently the owner is identified via the full path to the workdir.
return '='.join((self._PANTS_OWNER_ARG_PREFIX, workdir))
def _create_fingerprint_arg(self, fingerprint):
return '='.join((self.FINGERPRINT_CMD_KEY, fingerprint))
@staticmethod
def _fingerprint(jvm_options, classpath, java_version):
"""Compute a fingerprint for this invocation of a Java task.
:param list jvm_options: JVM options passed to the java invocation
:param list classpath: The -cp arguments passed to the java invocation
:param Revision java_version: return value from Distribution.version()
:return: a hexstring representing a fingerprint of the java invocation
"""
digest = hashlib.sha1()
# TODO(John Sirois): hash classpath contents?
[digest.update(item) for item in (''.join(sorted(jvm_options)),
''.join(sorted(classpath)),
repr(java_version))]
return digest.hexdigest()
def _runner(self, classpath, main, jvm_options, args, cwd=None):
"""Runner factory. Called via Executor.execute()."""
command = self._create_command(classpath, main, jvm_options, args)
class Runner(self.Runner):
@property
def executor(this):
return self
@property
def command(self):
return list(command)
def run(this, stdout=None, stderr=None, stdin=None, cwd=None):
nailgun = self._get_nailgun_client(jvm_options, classpath, stdout, stderr, stdin)
try:
logger.debug('Executing via {ng_desc}: {cmd}'.format(ng_desc=nailgun, cmd=this.cmd))
return nailgun.execute(main, cwd, *args)
except nailgun.NailgunError as e:
self.terminate()
raise self.Error('Problem launching via {ng_desc} command {main} {args}: {msg}'
.format(ng_desc=nailgun, main=main, args=' '.join(args), msg=e))
return Runner()
def _check_nailgun_state(self, new_fingerprint):
running = self.is_alive()
updated = self.needs_restart(new_fingerprint) or self.cmd != self._distribution.java
logging.debug('Nailgun {nailgun} state: updated={up!s} running={run!s} fingerprint={old_fp} '
'new_fingerprint={new_fp} distribution={old_dist} new_distribution={new_dist}'
.format(nailgun=self._identity, up=updated, run=running,
old_fp=self.fingerprint, new_fp=new_fingerprint,
old_dist=self.cmd, new_dist=self._distribution.java))
return running, updated
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin):
"""This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles
creation of the running nailgun server as well as creation of the client."""
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
with self._NAILGUN_SPAWN_LOCK:
running, updated = self._check_nailgun_state(new_fingerprint)
if running and updated:
logger.debug('Found running nailgun server that needs updating, killing {server}'
.format(server=self._identity))
self.terminate()
if (not running) or (running and updated):
return self._spawn_nailgun_server(new_fingerprint, jvm_options, classpath, stdout, stderr, stdin)
return self._create_ngclient(self.socket, stdout, stderr, stdin)
def _await_socket(self, timeout):
"""Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun stdout."""
with safe_open(self._ng_stdout, 'r') as ng_stdout:
start_time = time.time()
accumulated_stdout = ''
while 1:
readable, _, _ = select.select([ng_stdout], [], [], self._SELECT_WAIT)
if readable:
line = ng_stdout.readline() # TODO: address deadlock risk here.
try:
return self._NG_PORT_REGEX.match(line).group(1)
except AttributeError:
pass
accumulated_stdout += line
if (time.time() - start_time) > timeout:
stderr = read_file(self._ng_stderr)
raise NailgunClient.NailgunError(
'Failed to read nailgun output after {sec} seconds!\n'
'Stdout:\n{stdout}\nStderr:\n{stderr}'.format(
sec=timeout,
stdout=accumulated_stdout,
stderr=stderr,
)
)
def _create_ngclient(self, port, stdout, stderr, stdin):
return NailgunClient(port=port, ins=stdin, out=stdout, err=stderr, workdir=get_buildroot())
def ensure_connectable(self, nailgun):
"""Ensures that a nailgun client is connectable or raises NailgunError."""
attempt_count = 1
while 1:
try:
with closing(nailgun.try_connect()) as sock:
logger.debug('Verified new ng server is connectable at {}'.format(sock.getpeername()))
return
except nailgun.NailgunConnectionError:
if attempt_count >= self._connect_attempts:
logger.debug('Failed to connect to ng after {} attempts'.format(self._connect_attempts))
raise # Re-raise the NailgunConnectionError which provides more context to the user.
attempt_count += 1
time.sleep(self.WAIT_INTERVAL_SEC)
def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr, stdin):
"""Synchronously spawn a new nailgun server."""
# Truncate the nailguns stdout & stderr.
safe_file_dump(self._ng_stdout, '')
safe_file_dump(self._ng_stderr, '')
jvm_options = jvm_options + [self._PANTS_NG_BUILDROOT_ARG,
self._create_owner_arg(self._workdir),
self._create_fingerprint_arg(fingerprint)]
post_fork_child_opts = dict(fingerprint=fingerprint,
jvm_options=jvm_options,
classpath=classpath,
stdout=stdout,
stderr=stderr)
logger.debug('Spawning nailgun server {i} with fingerprint={f}, jvm_options={j}, classpath={cp}'
.format(i=self._identity, f=fingerprint, j=jvm_options, cp=classpath))
self.daemon_spawn(post_fork_child_opts=post_fork_child_opts)
# Wait for and write the port information in the parent so we can bail on exception/timeout.
self.await_pid(self._connect_timeout)
self.write_socket(self._await_socket(self._connect_timeout))
logger.debug('Spawned nailgun server {i} with fingerprint={f}, pid={pid} port={port}'
.format(i=self._identity, f=fingerprint, pid=self.pid, port=self.socket))
client = self._create_ngclient(self.socket, stdout, stderr, stdin)
self.ensure_connectable(client)
return client
def _check_process_buildroot(self, process):
"""Matches only processes started from the current buildroot."""
return self._PANTS_NG_BUILDROOT_ARG in process.cmdline()
def is_alive(self):
"""A ProcessManager.is_alive() override that ensures buildroot flags are present in the process
command line arguments."""
return super(NailgunExecutor, self).is_alive(self._check_process_buildroot)
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
java = SubprocessExecutor(self._distribution)
subproc = java.spawn(classpath=classpath,
main='com.martiansoftware.nailgun.NGServer',
jvm_options=jvm_options,
args=[':0'],
stdin=safe_open('/dev/null', 'r'),
stdout=safe_open(self._ng_stdout, 'w'),
stderr=safe_open(self._ng_stderr, 'w'),
close_fds=True)
self.write_pid(subproc.pid)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils.fixture import uuidsentinel as uuids
import nova.conf
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.tests.unit import utils as test_utils
from nova import utils
CONF = nova.conf.CONF
class AggregatesTest(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2'
ADMIN_API = True
def _add_hosts_to_aggregate(self):
"""List all compute services and add them all to an aggregate."""
compute_services = [s for s in self.api.get_services()
if s['binary'] == 'nova-compute']
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
for service in compute_services:
self.api.add_host_to_aggregate(agg['id'], service['host'])
self._test_aggregate = agg
return len(compute_services)
def test_add_hosts(self):
# Default case with one compute, mapped for us
self.assertEqual(1, self._add_hosts_to_aggregate())
def test_add_unmapped_host(self):
"""Ensure that hosts without mappings are still found and added"""
# Add another compute, but nuke its HostMapping
self.start_service('compute', host='compute2')
self.host_mappings['compute2'].destroy()
self.assertEqual(2, self._add_hosts_to_aggregate())
class AggregatesV281Test(AggregatesTest):
api_major_version = 'v2.1'
microversion = '2.81'
def setUp(self):
self.flags(compute_driver='fake.FakeDriverWithCaching')
super(AggregatesV281Test, self).setUp()
def test_cache_images_on_aggregate(self):
self._add_hosts_to_aggregate()
agg = self._test_aggregate
img = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
self.assertEqual(set(), self.compute.driver.cached_images)
body = {'cache': [
{'id': img},
]}
self.api.api_post('/os-aggregates/%s/images' % agg['id'], body,
check_response_status=[202])
self.assertEqual(set([img]), self.compute.driver.cached_images)
def test_cache_images_on_aggregate_missing_image(self):
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
# NOTE(danms): This image-id does not exist
img = '155d900f-4e14-4e4c-a73d-069cbf4541e0'
body = {'cache': [
{'id': img},
]}
self.api.api_post('/os-aggregates/%s/images' % agg['id'], body,
check_response_status=[400])
def test_cache_images_on_missing_aggregate(self):
img = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
body = {'cache': [
{'id': img},
]}
self.api.api_post('/os-aggregates/123/images', body,
check_response_status=[404])
def test_cache_images_with_duplicates(self):
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
img = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
body = {'cache': [
{'id': img},
{'id': img},
]}
self.api.api_post('/os-aggregates/%i/images' % agg['id'], body,
check_response_status=[400])
def test_cache_images_with_no_images(self):
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
body = {'cache': []}
self.api.api_post('/os-aggregates/%i/images' % agg['id'], body,
check_response_status=[400])
def test_cache_images_with_additional_in_image(self):
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
img = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
body = {'cache': [
{'id': img, 'power': '1.21 gigawatts'},
]}
self.api.api_post('/os-aggregates/%i/images' % agg['id'], body,
check_response_status=[400])
def test_cache_images_with_missing_image_id(self):
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
body = {'cache': [
{'power': '1.21 gigawatts'},
]}
self.api.api_post('/os-aggregates/%i/images' % agg['id'], body,
check_response_status=[400])
def test_cache_images_with_missing_cache(self):
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
body = {}
self.api.api_post('/os-aggregates/%i/images' % agg['id'], body,
check_response_status=[400])
def test_cache_images_with_additional_in_cache(self):
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
img = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
body = {'cache': [{'id': img}],
'power': '1.21 gigawatts',
}
self.api.api_post('/os-aggregates/%i/images' % agg['id'], body,
check_response_status=[400])
class AggregateRequestFiltersTest(
integrated_helpers.ProviderUsageBaseTestCase):
microversion = 'latest'
compute_driver = 'fake.MediumFakeDriver'
def setUp(self):
super(AggregateRequestFiltersTest, self).setUp()
self.aggregates = {}
self._start_compute('host1')
self._start_compute('host2')
self.flavors = self.api.get_flavors()
# Aggregate with only host1
self._create_aggregate('only-host1')
self._add_host_to_aggregate('only-host1', 'host1')
# Aggregate with only host2
self._create_aggregate('only-host2')
self._add_host_to_aggregate('only-host2', 'host2')
# Aggregate with neither host
self._create_aggregate('no-hosts')
def _start_compute(self, host):
"""Start a nova compute service on the given host
:param host: the name of the host that will be associated to the
compute service.
:return: the nova compute service object
"""
compute = self.start_service('compute', host=host)
self.computes[host] = compute
return compute
def _create_aggregate(self, name):
agg = self.admin_api.post_aggregate({'aggregate': {'name': name}})
self.aggregates[name] = agg
def _get_provider_uuid_by_host(self, host):
"""Return the compute node uuid for a named compute host."""
# NOTE(gibi): the compute node id is the same as the compute node
# provider uuid on that compute
resp = self.admin_api.api_get(
'os-hypervisors?hypervisor_hostname_pattern=%s' % host).body
return resp['hypervisors'][0]['id']
def _add_host_to_aggregate(self, agg, host):
"""Add a compute host to both nova and placement aggregates.
:param agg: Name of the nova aggregate
:param host: Name of the compute host
"""
agg = self.aggregates[agg]
self.admin_api.add_host_to_aggregate(agg['id'], host)
def _boot_server(self, az=None, flavor_id=None, image_id=None,
end_status='ACTIVE'):
flavor_id = flavor_id or self.flavors[0]['id']
image_uuid = image_id or '155d900f-4e14-4e4c-a73d-069cbf4541e6'
server_req = self._build_server(
image_uuid=image_uuid,
flavor_id=flavor_id,
networks='none', az=az)
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(created_server, end_status)
return server
def _get_instance_host(self, server):
srv = self.admin_api.get_server(server['id'])
return srv['OS-EXT-SRV-ATTR:host']
def _set_az_aggregate(self, agg, az):
"""Set the availability_zone of an aggregate
:param agg: Name of the nova aggregate
:param az: Availability zone name
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': {
'availability_zone': az,
}
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
def _set_metadata(self, agg, metadata):
"""POST /os-aggregates/{aggregate_id}/action (set_metadata)
:param agg: Name of the nova aggregate
:param metadata: dict of aggregate metadata key/value pairs to add,
update, or remove if value=None (note "availability_zone" cannot be
nulled out once set).
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': metadata
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
def _grant_tenant_aggregate(self, agg, tenants):
"""Grant a set of tenants access to use an aggregate.
:param agg: Name of the nova aggregate
:param tenants: A list of all tenant ids that will be allowed access
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': {
'filter_tenant_id%i' % i: tenant
for i, tenant in enumerate(tenants)
}
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
def _set_traits_on_aggregate(self, agg, traits):
"""Set traits to aggregate.
:param agg: Name of the nova aggregate
:param traits: List of traits to be assigned to the aggregate
"""
action = {
'set_metadata': {
'metadata': {
'trait:' + trait: 'required'
for trait in traits
}
}
}
self.admin_api.post_aggregate_action(
self.aggregates[agg]['id'], action)
class AggregatePostTest(AggregateRequestFiltersTest):
def test_set_az_for_aggreate_no_instances(self):
"""Should be possible to update AZ for an empty aggregate.
Check you can change the AZ name of an aggregate when it does
not contain any servers.
"""
self._set_az_aggregate('only-host1', 'fake-az')
def test_rename_to_same_az(self):
"""AZ rename should pass successfully if AZ name is not changed"""
az = 'fake-az'
self._set_az_aggregate('only-host1', az)
self._boot_server(az=az)
self._set_az_aggregate('only-host1', az)
def test_fail_set_az(self):
"""Check it is not possible to update a non-empty aggregate.
Check you cannot change the AZ name of an aggregate when it
contains any servers.
"""
az = 'fake-az'
self._set_az_aggregate('only-host1', az)
server = self._boot_server(az=az)
self.assertRaisesRegex(
client.OpenStackApiException,
'One or more hosts contain instances in this zone.',
self._set_az_aggregate, 'only-host1', 'new' + az)
# Configure for the SOFT_DELETED scenario.
self.flags(reclaim_instance_interval=300)
self.api.delete_server(server['id'])
server = self._wait_for_state_change(server, 'SOFT_DELETED')
self.assertRaisesRegex(
client.OpenStackApiException,
'One or more hosts contain instances in this zone.',
self._set_az_aggregate, 'only-host1', 'new' + az)
# Force delete the SOFT_DELETED server.
self.api.api_post(
'/servers/%s/action' % server['id'], {'forceDelete': None})
# Wait for it to be deleted since forceDelete is asynchronous.
self._wait_until_deleted(server)
# Now we can rename the AZ since the server is gone.
self._set_az_aggregate('only-host1', 'new' + az)
def test_cannot_delete_az(self):
az = 'fake-az'
# Assign the AZ to the aggregate.
self._set_az_aggregate('only-host1', az)
# Set some metadata on the aggregate; note the "availability_zone"
# metadata key is not specified.
self._set_metadata('only-host1', {'foo': 'bar'})
# Verify the AZ was retained.
agg = self.admin_api.api_get(
'/os-aggregates/%s' %
self.aggregates['only-host1']['id']).body['aggregate']
self.assertEqual(az, agg['availability_zone'])
# NOTE: this test case has the same test methods as AggregatePostTest
# but for the AZ update it uses PUT /os-aggregates/{aggregate_id} method
class AggregatePutTest(AggregatePostTest):
def _set_az_aggregate(self, agg, az):
"""Set the availability_zone of an aggregate via PUT
:param agg: Name of the nova aggregate
:param az: Availability zone name
"""
agg = self.aggregates[agg]
body = {
'aggregate': {
'availability_zone': az,
},
}
self.admin_api.put_aggregate(agg['id'], body)
class TenantAggregateFilterTest(AggregateRequestFiltersTest):
def setUp(self):
super(TenantAggregateFilterTest, self).setUp()
# Default to enabling the filter and making it mandatory
self.flags(limit_tenants_to_placement_aggregate=True,
group='scheduler')
self.flags(placement_aggregate_required_for_tenants=True,
group='scheduler')
def test_tenant_id_required_fails_if_no_aggregate(self):
# Without granting our tenant permission to an aggregate, instance
# creates should fail since aggregates are required
self._boot_server(end_status='ERROR')
def test_tenant_id_not_required_succeeds_if_no_aggregate(self):
self.flags(placement_aggregate_required_for_tenants=False,
group='scheduler')
# Without granting our tenant permission to an aggregate, instance
# creates should still succeed since aggregates are not required
self._boot_server(end_status='ACTIVE')
def test_filter_honors_tenant_id(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with only host1 in it
# and boot some servers. They should all stack up on host1.
self._grant_tenant_aggregate('only-host1',
['foo', tenant, 'bar'])
server1 = self._boot_server(end_status='ACTIVE')
server2 = self._boot_server(end_status='ACTIVE')
# Grant our tenant access to the aggregate with only host2 in it
# and boot some servers. They should all stack up on host2.
self._grant_tenant_aggregate('only-host1',
['foo', 'bar'])
self._grant_tenant_aggregate('only-host2',
['foo', tenant, 'bar'])
server3 = self._boot_server(end_status='ACTIVE')
server4 = self._boot_server(end_status='ACTIVE')
# Make sure the servers landed on the hosts we had access to at
# the time we booted them.
hosts = [self._get_instance_host(s)
for s in (server1, server2, server3, server4)]
expected_hosts = ['host1', 'host1', 'host2', 'host2']
self.assertEqual(expected_hosts, hosts)
def test_filter_with_empty_aggregate(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with no hosts in it
self._grant_tenant_aggregate('no-hosts',
['foo', tenant, 'bar'])
self._boot_server(end_status='ERROR')
def test_filter_with_multiple_aggregates_for_tenant(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with no hosts in it,
# and one with a host.
self._grant_tenant_aggregate('no-hosts',
['foo', tenant, 'bar'])
self._grant_tenant_aggregate('only-host2',
['foo', tenant, 'bar'])
# Boot several servers and make sure they all land on the
# only host we have access to.
for i in range(0, 4):
server = self._boot_server(end_status='ACTIVE')
self.assertEqual('host2', self._get_instance_host(server))
class AvailabilityZoneFilterTest(AggregateRequestFiltersTest):
def setUp(self):
# Default to enabling the filter
self.flags(query_placement_for_availability_zone=True,
group='scheduler')
# Use custom weigher to make sure that we have a predictable
# scheduling sort order.
self.useFixture(nova_fixtures.HostNameWeigherFixture())
# NOTE(danms): Do this before calling setUp() so that
# the scheduler service that is started sees the new value
filters = CONF.filter_scheduler.enabled_filters
filters.remove('AvailabilityZoneFilter')
self.flags(enabled_filters=filters, group='filter_scheduler')
super(AvailabilityZoneFilterTest, self).setUp()
def test_filter_with_az(self):
self._set_az_aggregate('only-host2', 'myaz')
server1 = self._boot_server(az='myaz')
server2 = self._boot_server(az='myaz')
hosts = [self._get_instance_host(s) for s in (server1, server2)]
self.assertEqual(['host2', 'host2'], hosts)
class IsolateAggregateFilterTest(AggregateRequestFiltersTest):
def setUp(self):
# Default to enabling the filter
self.flags(enable_isolated_aggregate_filtering=True,
group='scheduler')
# Use a custom weigher that would prefer host1 if the isolate
# aggregate filter were not in place otherwise it's not deterministic
# whether we're landing on host2 because of the filter or just by
# chance.
self.useFixture(nova_fixtures.HostNameWeigherFixture())
super(IsolateAggregateFilterTest, self).setUp()
self.image_service = nova.tests.unit.image.fake.FakeImageService()
# setting traits to flavors
flavor_body = {'flavor': {'name': 'test_flavor',
'ram': 512,
'vcpus': 1,
'disk': 1
}}
self.flavor_with_trait_dxva = self.api.post_flavor(flavor_body)
self.admin_api.post_extra_spec(
self.flavor_with_trait_dxva['id'],
{'extra_specs': {'trait:HW_GPU_API_DXVA': 'required'}})
flavor_body['flavor']['name'] = 'test_flavor1'
self.flavor_with_trait_sgx = self.api.post_flavor(flavor_body)
self.admin_api.post_extra_spec(
self.flavor_with_trait_sgx['id'],
{'extra_specs': {'trait:HW_CPU_X86_SGX': 'required'}})
self.flavor_without_trait = self.flavors[0]
with nova.utils.temporary_mutation(self.api, microversion='2.35'):
images = self.api.get_images()
self.image_id_without_trait = images[0]['id']
def test_filter_with_no_valid_host(self):
"""Test 'isolate_aggregates' filter with no valid hosts.
No required traits set in image/flavor, so all aggregates with
required traits set should be ignored.
"""
rp_uuid1 = self._get_provider_uuid_by_host('host1')
self._set_provider_traits(
rp_uuid1, ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
self._set_traits_on_aggregate(
'only-host1', ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
rp_uuid2 = self._get_provider_uuid_by_host('host2')
self._set_provider_traits(
rp_uuid2, ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
self._set_traits_on_aggregate(
'only-host2', ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
server = self._boot_server(
flavor_id=self.flavor_without_trait['id'],
image_id=self.image_id_without_trait,
end_status='ERROR')
self.assertIsNone(self._get_instance_host(server))
self.assertIn('No valid host', server['fault']['message'])
def test_filter_without_trait(self):
"""Test 'isolate_aggregates' filter with valid hosts.
No required traits set in image/flavor so instance should be booted on
host from an aggregate with no required traits set.
"""
rp_uuid1 = self._get_provider_uuid_by_host('host1')
self._set_provider_traits(
rp_uuid1, ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
self._set_traits_on_aggregate(
'only-host1', ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
server = self._boot_server(
flavor_id=self.flavor_without_trait['id'],
image_id=self.image_id_without_trait)
self.assertEqual('host2', self._get_instance_host(server))
def test_filter_with_trait_on_flavor(self):
"""Test filter with matching required traits set only in one aggregate.
Required trait (HW_GPU_API_DXVA) set in flavor so instance should be
booted on host with matching required traits set on aggregates.
"""
rp_uuid2 = self._get_provider_uuid_by_host('host2')
self._set_provider_traits(rp_uuid2, ['HW_GPU_API_DXVA'])
rp_uuid1 = self._get_provider_uuid_by_host('host1')
self._set_provider_traits(
rp_uuid1, ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
self._set_traits_on_aggregate('only-host2', ['HW_GPU_API_DXVA'])
self._set_traits_on_aggregate(
'only-host1', ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
server = self._boot_server(
flavor_id=self.flavor_with_trait_dxva['id'],
image_id=self.image_id_without_trait)
self.assertEqual('host2', self._get_instance_host(server))
def test_filter_with_common_trait_on_aggregates(self):
"""Test filter with common required traits set to aggregates.
Required trait (HW_CPU_X86_SGX) set in flavor so instance should be
booted on host with exact matching required traits set on aggregates.
"""
rp_uuid2 = self._get_provider_uuid_by_host('host2')
self._set_provider_traits(rp_uuid2, ['HW_CPU_X86_SGX'])
rp_uuid1 = self._get_provider_uuid_by_host('host1')
self._set_provider_traits(
rp_uuid1, ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
self._set_traits_on_aggregate('only-host2', ['HW_CPU_X86_SGX'])
self._set_traits_on_aggregate(
'only-host1', ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
server = self._boot_server(
flavor_id=self.flavor_with_trait_sgx['id'],
image_id=self.image_id_without_trait)
self.assertEqual('host2', self._get_instance_host(server))
def test_filter_with_traits_on_image_and_flavor(self):
"""Test filter with common traits set to image/flavor and aggregates.
Required trait (HW_CPU_X86_SGX) set in flavor and
required trait (HW_CPU_X86_VMX) set in image, so instance should be
booted on host with exact matching required traits set on aggregates.
"""
rp_uuid2 = self._get_provider_uuid_by_host('host2')
self._set_provider_traits(
rp_uuid2, ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
rp_uuid1 = self._get_provider_uuid_by_host('host1')
self._set_provider_traits(rp_uuid1, ['HW_GPU_API_DXVA'])
self._set_traits_on_aggregate('only-host1', ['HW_GPU_API_DXVA'])
self._set_traits_on_aggregate(
'only-host2', ['HW_CPU_X86_VMX', 'HW_CPU_X86_SGX'])
# Creating a new image and setting traits on it.
with nova.utils.temporary_mutation(self.api, microversion='2.35'):
self.ctxt = test_utils.get_test_admin_context()
img_ref = self.image_service.create(self.ctxt, {'name': 'image10'})
image_id_with_trait = img_ref['id']
self.addCleanup(
self.image_service.delete, self.ctxt, image_id_with_trait)
self.api.api_put('/images/%s/metadata' % image_id_with_trait,
{'metadata': {
'trait:HW_CPU_X86_VMX': 'required'}})
server = self._boot_server(
flavor_id=self.flavor_with_trait_sgx['id'],
image_id=image_id_with_trait)
self.assertEqual('host2', self._get_instance_host(server))
def test_filter_with_traits_image_flavor_subset_of_aggregates(self):
"""Test filter with image/flavor required traits subset of aggregates.
Image and flavor has a nonempty set of required traits that's subset
set of the traits on the aggregates.
"""
rp_uuid2 = self._get_provider_uuid_by_host('host2')
self._set_provider_traits(
rp_uuid2, ['HW_CPU_X86_VMX', 'HW_GPU_API_DXVA', 'HW_CPU_X86_SGX'])
self._set_traits_on_aggregate(
'only-host2',
['HW_CPU_X86_VMX', 'HW_GPU_API_DXVA', 'HW_CPU_X86_SGX'])
# Creating a new image and setting traits on it.
with nova.utils.temporary_mutation(self.api, microversion='2.35'):
self.ctxt = test_utils.get_test_admin_context()
img_ref = self.image_service.create(self.ctxt, {'name': 'image10'})
image_id_with_trait = img_ref['id']
self.addCleanup(
self.image_service.delete, self.ctxt, image_id_with_trait)
self.api.api_put('/images/%s/metadata' % image_id_with_trait,
{'metadata': {
'trait:HW_CPU_X86_VMX': 'required'}})
server = self._boot_server(
flavor_id=self.flavor_with_trait_sgx['id'],
image_id=image_id_with_trait,
end_status='ERROR')
self.assertIsNone(self._get_instance_host(server))
self.assertIn('No valid host', server['fault']['message'])
def test_filter_with_traits_image_flavor_disjoint_of_aggregates(self):
"""Test filter with image/flav required traits disjoint of aggregates.
Image and flavor has a nonempty set of required traits that's disjoint
set of the traits on the aggregates.
"""
rp_uuid2 = self._get_provider_uuid_by_host('host2')
self._set_provider_traits(rp_uuid2, ['HW_CPU_X86_VMX'])
rp_uuid1 = self._get_provider_uuid_by_host('host1')
self._set_provider_traits(rp_uuid1, ['HW_GPU_API_DXVA'])
self._set_traits_on_aggregate('only-host1', ['HW_GPU_API_DXVA'])
self._set_traits_on_aggregate('only-host2', ['HW_CPU_X86_VMX'])
# Creating a new image and setting traits on it.
with nova.utils.temporary_mutation(self.api, microversion='2.35'):
self.ctxt = test_utils.get_test_admin_context()
img_ref = self.image_service.create(self.ctxt, {'name': 'image10'})
image_id_with_trait = img_ref['id']
self.addCleanup(
self.image_service.delete, self.ctxt, image_id_with_trait)
self.api.api_put('/images/%s/metadata' % image_id_with_trait,
{'metadata': {
'trait:HW_CPU_X86_VMX': 'required'}})
server = self._boot_server(
flavor_id=self.flavor_with_trait_sgx['id'],
image_id=image_id_with_trait,
end_status='ERROR')
self.assertIsNone(self._get_instance_host(server))
self.assertIn('No valid host', server['fault']['message'])
class IsolateAggregateFilterTestWithConcernFilters(IsolateAggregateFilterTest):
def setUp(self):
filters = CONF.filter_scheduler.enabled_filters
# NOTE(shilpasd): To test `isolate_aggregates` request filter, along
# with following filters which also filters hosts based on aggregate
# metadata.
if 'AggregateImagePropertiesIsolation' not in filters:
filters.append('AggregateImagePropertiesIsolation')
if 'AggregateInstanceExtraSpecsFilter' not in filters:
filters.append('AggregateInstanceExtraSpecsFilter')
self.flags(enabled_filters=filters, group='filter_scheduler')
super(IsolateAggregateFilterTestWithConcernFilters, self).setUp()
class IsolateAggregateFilterTestWOConcernFilters(IsolateAggregateFilterTest):
def setUp(self):
filters = CONF.filter_scheduler.enabled_filters
# NOTE(shilpasd): To test `isolate_aggregates` request filter, removed
# following filters which also filters hosts based on aggregate
# metadata.
if 'AggregateImagePropertiesIsolation' in filters:
filters.remove('AggregateImagePropertiesIsolation')
if 'AggregateInstanceExtraSpecsFilter' in filters:
filters.remove('AggregateInstanceExtraSpecsFilter')
self.flags(enabled_filters=filters, group='filter_scheduler')
super(IsolateAggregateFilterTestWOConcernFilters, self).setUp()
class TestAggregateFiltersTogether(AggregateRequestFiltersTest):
def setUp(self):
# Use a custom weigher that would prefer host1 if the forbidden
# aggregate filter were not in place otherwise it's not deterministic
# whether we're landing on host2 because of the filter or just by
# chance.
self.useFixture(nova_fixtures.HostNameWeigherFixture())
# NOTE(danms): Do this before calling setUp() so that
# the scheduler service that is started sees the new value
filters = CONF.filter_scheduler.enabled_filters
filters.remove('AvailabilityZoneFilter')
# NOTE(shilpasd): To test `isolate_aggregates` request filter, removed
# following filters which also filters hosts based on aggregate
# metadata.
if 'AggregateImagePropertiesIsolation' in filters:
filters.remove('AggregateImagePropertiesIsolation')
if 'AggregateInstanceExtraSpecsFilter' in filters:
filters.remove('AggregateInstanceExtraSpecsFilter')
self.flags(enabled_filters=filters, group='filter_scheduler')
super(TestAggregateFiltersTogether, self).setUp()
# Default to enabling all filters
self.flags(limit_tenants_to_placement_aggregate=True,
group='scheduler')
self.flags(placement_aggregate_required_for_tenants=True,
group='scheduler')
self.flags(query_placement_for_availability_zone=True,
group='scheduler')
self.flags(enable_isolated_aggregate_filtering=True,
group='scheduler')
# setting traits to flavors
flavor_body = {'flavor': {'name': 'test_flavor',
'ram': 512,
'vcpus': 1,
'disk': 1
}}
self.flavor_with_trait_dxva = self.api.post_flavor(flavor_body)
self.admin_api.post_extra_spec(
self.flavor_with_trait_dxva['id'],
{'extra_specs': {'trait:HW_GPU_API_DXVA': 'required'}})
def test_tenant_with_az_match(self):
# Grant our tenant access to the aggregate with
# host1
self._grant_tenant_aggregate('only-host1',
[self.api.project_id])
# Set an az on only-host1
self._set_az_aggregate('only-host1', 'myaz')
# Boot the server into that az and make sure we land
server = self._boot_server(az='myaz')
self.assertEqual('host1', self._get_instance_host(server))
def test_tenant_with_az_mismatch(self):
# Grant our tenant access to the aggregate with
# host1
self._grant_tenant_aggregate('only-host1',
[self.api.project_id])
# Set an az on only-host2
self._set_az_aggregate('only-host2', 'myaz')
# Boot the server into that az and make sure we fail
server = self._boot_server(az='myaz', end_status='ERROR')
self.assertIsNone(self._get_instance_host(server))
def test_tenant_with_az_and_traits_match(self):
# Grant our tenant access to the aggregate with host2
self._grant_tenant_aggregate('only-host2',
[self.api.project_id])
# Set an az on only-host2
self._set_az_aggregate('only-host2', 'myaz')
# Set trait on host2
rp_uuid2 = self._get_provider_uuid_by_host('host2')
self._set_provider_traits(rp_uuid2, ['HW_GPU_API_DXVA'])
# Set trait on aggregate only-host2
self._set_traits_on_aggregate('only-host2', ['HW_GPU_API_DXVA'])
# Boot the server into that az and make sure we land
server = self._boot_server(
flavor_id=self.flavor_with_trait_dxva['id'], az='myaz')
self.assertEqual('host2', self._get_instance_host(server))
def test_tenant_with_az_and_traits_mismatch(self):
# Grant our tenant access to the aggregate with host2
self._grant_tenant_aggregate('only-host2',
[self.api.project_id])
# Set an az on only-host1
self._set_az_aggregate('only-host2', 'myaz')
# Set trait on host2
rp_uuid2 = self._get_provider_uuid_by_host('host2')
self._set_provider_traits(rp_uuid2, ['HW_CPU_X86_VMX'])
# Set trait on aggregate only-host2
self._set_traits_on_aggregate('only-host2', ['HW_CPU_X86_VMX'])
# Boot the server into that az and make sure we fail
server = self._boot_server(
flavor_id=self.flavor_with_trait_dxva['id'],
az='myaz',
end_status='ERROR')
self.assertIsNone(self._get_instance_host(server))
self.assertIn('No valid host', server['fault']['message'])
class TestAggregateMultiTenancyIsolationFilter(
test.TestCase, integrated_helpers.InstanceHelperMixin):
def _start_compute(self, host):
self.start_service('compute', host=host)
def setUp(self):
super(TestAggregateMultiTenancyIsolationFilter, self).setUp()
# Stub out glance, placement and neutron.
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.useFixture(func_fixtures.PlacementFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
# Start nova services.
self.start_service('conductor')
self.admin_api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
self.api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1',
project_id=uuids.non_admin)).api
# Add the AggregateMultiTenancyIsolation to the list of enabled
# filters since it is not enabled by default.
enabled_filters = CONF.filter_scheduler.enabled_filters
enabled_filters.append('AggregateMultiTenancyIsolation')
self.flags(enabled_filters=enabled_filters, group='filter_scheduler')
self.start_service('scheduler')
for host in ('host1', 'host2'):
self._start_compute(host)
def test_aggregate_multitenancy_isolation_filter(self):
"""Tests common scenarios with the AggregateMultiTenancyIsolation
filter:
* hosts in a tenant-isolated aggregate are only accepted for that
tenant
* hosts not in a tenant-isolated aggregate are acceptable for all
tenants, including tenants with access to the isolated-tenant
aggregate
"""
# Create a tenant-isolated aggregate for the non-admin user.
agg_id = self.admin_api.post_aggregate(
{'aggregate': {'name': 'non_admin_agg'}})['id']
meta_req = {'set_metadata': {
'metadata': {'filter_tenant_id': uuids.non_admin}}}
self.admin_api.api_post('/os-aggregates/%s/action' % agg_id, meta_req)
# Add host2 to the aggregate; we'll restrict host2 to the non-admin
# tenant.
host_req = {'add_host': {'host': 'host2'}}
self.admin_api.api_post('/os-aggregates/%s/action' % agg_id, host_req)
# Stub out select_destinations to assert how many host candidates were
# available per tenant-specific request.
original_filtered_hosts = (
nova.scheduler.host_manager.HostManager.get_filtered_hosts)
def spy_get_filtered_hosts(*args, **kwargs):
self.filtered_hosts = original_filtered_hosts(*args, **kwargs)
return self.filtered_hosts
self.stub_out(
'nova.scheduler.host_manager.HostManager.get_filtered_hosts',
spy_get_filtered_hosts)
# Create a server for the admin - should only have one host candidate.
server_req = {'server': self._build_server(networks='none')}
with utils.temporary_mutation(self.admin_api, microversion='2.37'):
server = self.admin_api.post_server(server_req)
server = self._wait_for_state_change(server, 'ACTIVE')
# Assert it's not on host2 which is isolated to the non-admin tenant.
self.assertNotEqual('host2', server['OS-EXT-SRV-ATTR:host'])
self.assertEqual(1, len(self.filtered_hosts))
# Now create a server for the non-admin tenant to which host2 is
# isolated via the aggregate, but the other compute host is a
# candidate. We don't assert that the non-admin tenant server shows
# up on host2 because the other host, which is not isolated to the
# aggregate, is still a candidate.
server_req = {'server': self._build_server(networks='none')}
with utils.temporary_mutation(self.api, microversion='2.37'):
server = self.api.post_server(server_req)
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(self.filtered_hosts))
class AggregateMultiTenancyIsolationColdMigrateTest(
test.TestCase, integrated_helpers.InstanceHelperMixin):
@staticmethod
def _create_aggregate(admin_api, name):
return admin_api.api_post(
'/os-aggregates', {'aggregate': {'name': name}}).body['aggregate']
@staticmethod
def _add_host_to_aggregate(admin_api, aggregate, host):
add_host_req_body = {
"add_host": {
"host": host
}
}
admin_api.api_post(
'/os-aggregates/%s/action' % aggregate['id'], add_host_req_body)
@staticmethod
def _isolate_aggregate(admin_api, aggregate, tenant_id):
set_meta_req_body = {
"set_metadata": {
"metadata": {
"filter_tenant_id": tenant_id
}
}
}
admin_api.api_post(
'/os-aggregates/%s/action' % aggregate['id'], set_meta_req_body)
def setUp(self):
super(AggregateMultiTenancyIsolationColdMigrateTest, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
# Intentionally keep these separate since we want to create the
# server with the non-admin user in a different project.
admin_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1', project_id=uuids.admin_project))
self.admin_api = admin_api_fixture.admin_api
self.admin_api.microversion = 'latest'
user_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1', project_id=uuids.user_project))
self.api = user_api_fixture.api
self.api.microversion = 'latest'
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.start_service('conductor')
# Enable the AggregateMultiTenancyIsolation filter before starting the
# scheduler service.
enabled_filters = CONF.filter_scheduler.enabled_filters
if 'AggregateMultiTenancyIsolation' not in enabled_filters:
enabled_filters.append('AggregateMultiTenancyIsolation')
self.flags(
enabled_filters=enabled_filters, group='filter_scheduler')
# Add a custom weigher which will weigh host1, which will be in the
# admin project aggregate, higher than the other hosts which are in
# the non-admin project aggregate.
self.useFixture(nova_fixtures.HostNameWeigherFixture())
self.start_service('scheduler')
for host in ('host1', 'host2', 'host3'):
self.start_service('compute', host=host)
# Create an admin-only aggregate for the admin project. This is needed
# because if host1 is not in an aggregate with the filter_tenant_id
# metadata key, the filter will accept that host even for the non-admin
# project.
admin_aggregate = self._create_aggregate(
self.admin_api, 'admin-aggregate')
self._add_host_to_aggregate(self.admin_api, admin_aggregate, 'host1')
# Restrict the admin project to the admin aggregate.
self._isolate_aggregate(
self.admin_api, admin_aggregate, uuids.admin_project)
# Create the tenant aggregate for the non-admin project.
tenant_aggregate = self._create_aggregate(
self.admin_api, 'tenant-aggregate')
# Add two compute hosts to the tenant aggregate. We exclude host1
# since that is weighed higher due to HostNameWeigherFixture and we
# want to ensure the scheduler properly filters out host1 before we
# even get to weighing the selected hosts.
for host in ('host2', 'host3'):
self._add_host_to_aggregate(self.admin_api, tenant_aggregate, host)
# Restrict the non-admin project to the tenant aggregate.
self._isolate_aggregate(
self.admin_api, tenant_aggregate, uuids.user_project)
def test_cold_migrate_server(self):
"""Creates a server using the non-admin project, then cold migrates
the server and asserts the server goes to the other host in the
isolated host aggregate via the AggregateMultiTenancyIsolation filter.
"""
img = nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID
server_req_body = self._build_server(
image_uuid=img,
networks='none')
server = self.api.post_server({'server': server_req_body})
server = self._wait_for_state_change(server, 'ACTIVE')
# Ensure the server ended up in host2 or host3
original_host = server['OS-EXT-SRV-ATTR:host']
self.assertNotEqual('host1', original_host)
# Now cold migrate the server and it should end up in the other host
# in the same tenant-isolated aggregate.
self.admin_api.api_post(
'/servers/%s/action' % server['id'], {'migrate': None})
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
# Ensure the server is on the other host in the same aggregate.
expected_host = 'host3' if original_host == 'host2' else 'host2'
self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])
|
|
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.utils import requires_tz_support
from django.utils import timezone
from .models import Book, BookSigning
def _make_books(n, base_date):
for i in range(n):
Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100 + i,
pubdate=base_date - datetime.timedelta(days=i))
@override_settings(ROOT_URLCONF='generic_views.urls')
class ArchiveIndexViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_context_object_name(self):
res = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertFalse('latest' in res.context)
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_archive_view_invalid(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/dates/books/invalid/')
def test_archive_view_by_month(self):
res = self.client.get('/dates/books/by_month/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'month', 'DESC')))
def test_paginated_archive_view(self):
_make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
def test_paginated_archive_view_does_not_load_entire_table(self):
# Regression test for #18087
_make_books(20, base_date=datetime.date.today())
# 1 query for years list + 1 query for books
with self.assertNumQueries(2):
self.client.get('/dates/books/')
# same as above + 1 query to test if books exist + 1 query to count them
with self.assertNumQueries(4):
self.client.get('/dates/books/paginated/')
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(2):
self.client.get('/dates/books/reverse/')
def test_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted descending in index"""
_make_books(5, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(reversed(sorted(res.context['date_list']))))
def test_archive_view_custom_sorting(self):
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2007, 5, 1))
res = self.client.get('/dates/books/sortedbyname/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.order_by('name').all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_custom_sorting_dec(self):
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2007, 5, 1))
res = self.client.get('/dates/books/sortedbynamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.order_by('-name').all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
@override_settings(ROOT_URLCONF='generic_views.urls')
class YearArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(res.context['year'], datetime.date(2008, 1, 1))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
# Since allow_empty=False, next/prev years must be valid (#7164)
self.assertEqual(res.context['next_year'], None)
self.assertEqual(res.context['previous_year'], datetime.date(2006, 1, 1))
def test_year_view_make_object_list(self):
res = self.client.get('/dates/books/2006/make_object_list/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 5, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_empty(self):
res = self.client.get('/dates/books/1999/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/1999/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
# Since allow_empty=True, next/prev are allowed to be empty years (#7164)
self.assertEqual(res.context['next_year'], datetime.date(2000, 1, 1))
self.assertEqual(res.context['previous_year'], datetime.date(1998, 1, 1))
def test_year_view_allow_future(self):
# Create a new book in the future
year = datetime.date.today().year + 1
Book.objects.create(name="The New New Testement", pages=600, pubdate=datetime.date(year, 1, 1))
res = self.client.get('/dates/books/%s/' % year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/allow_empty/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
res = self.client.get('/dates/books/%s/allow_future/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(year, 1, 1)])
def test_year_view_paginated(self):
res = self.client.get('/dates/books/2006/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_custom_sort_order(self):
# Zebras comes after Dreaming by name, but before on '-pubdate' which is the default sorting
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2006, 9, 1))
res = self.client.get('/dates/books/2006/sortedbyname/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 5, 1), datetime.date(2006, 9, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006).order_by('name')))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006).order_by('name')))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_two_custom_sort_orders(self):
Book.objects.create(name="Zebras for Dummies", pages=300, pubdate=datetime.date(2006, 9, 1))
Book.objects.create(name="Hunting Hippos", pages=400, pubdate=datetime.date(2006, 3, 1))
res = self.client.get('/dates/books/2006/sortedbypageandnamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 3, 1), datetime.date(2006, 5, 1), datetime.date(2006, 9, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006).order_by('pages', '-name')))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006).order_by('pages', '-name')))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_invalid_pattern(self):
res = self.client.get('/dates/books/no_year/')
self.assertEqual(res.status_code, 404)
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(4):
self.client.get('/dates/books/2008/reverse/')
def test_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in year view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
@override_settings(ROOT_URLCONF='generic_views.urls')
class MonthArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_month_view(self):
res = self.client.get('/dates/books/2008/oct/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['month'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev months must be valid (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['month'], datetime.date(2000, 1, 1))
# Since allow_empty=True, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], datetime.date(2000, 2, 1))
self.assertEqual(res.context['previous_month'], datetime.date(1999, 12, 1))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], None)
def test_month_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60)).replace(day=1)
urlbit = future.strftime('%Y/%b').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'][0], b.pubdate)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['month'], future)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month. So next
# should be in the future (yup, #7164, again)
res = self.client.get('/dates/books/2008/oct/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], future)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/')
self.assertEqual(res.status_code, 200)
def test_month_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/no_month/')
self.assertEqual(res.status_code, 404)
def test_previous_month_without_content(self):
"Content can exist on any day of the previous month. Refs #14711"
self.pubdate_list = [
datetime.date(2010, month, day)
for month, day in ((9, 1), (10, 2), (11, 3))
]
for pubdate in self.pubdate_list:
name = str(pubdate)
Book.objects.create(name=name, slug=name, pages=100, pubdate=pubdate)
res = self.client.get('/dates/books/2010/nov/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 10, 1))
# The following test demonstrates the bug
res = self.client.get('/dates/books/2010/nov/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 10, 1))
# The bug does not occur here because a Book with pubdate of Sep 1 exists
res = self.client.get('/dates/books/2010/oct/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 9, 1))
def test_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in month view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/dec/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
@override_settings(ROOT_URLCONF='generic_views.urls')
class WeekArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_week_view(self):
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
self.assertEqual(res.context['book_list'][0], Book.objects.get(pubdate=datetime.date(2008, 10, 1)))
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
# Since allow_empty=False, next/prev weeks must be valid
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_allow_empty(self):
# allow_empty = False, empty week
res = self.client.get('/dates/books/2008/week/12/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2008/week/12/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['week'], datetime.date(2008, 3, 23))
# Since allow_empty=True, next/prev are allowed to be empty weeks
self.assertEqual(res.context['next_week'], datetime.date(2008, 3, 30))
self.assertEqual(res.context['previous_week'], datetime.date(2008, 3, 16))
# allow_empty but not allow_future: next_week should be empty
url = datetime.date.today().strftime('/dates/books/%Y/week/%U/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], None)
def test_week_view_allow_future(self):
# January 7th always falls in week 1, given Python's definition of week numbers
future = datetime.date(datetime.date.today().year + 1, 1, 7)
future_sunday = future - datetime.timedelta(days=(future.weekday() + 1) % 7)
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/week/1/' % future.year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/week/1/allow_future/' % future.year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['week'], future_sunday)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty weeks
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2008, 9, 28))
# allow_future, but not allow_empty, with a current week. So next
# should be in the future
res = self.client.get('/dates/books/2008/week/39/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], future_sunday)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_paginated(self):
week_start = datetime.date(2008, 9, 28)
week_end = week_start + datetime.timedelta(days=7)
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
def test_week_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/week/no_week/')
self.assertEqual(res.status_code, 404)
def test_week_start_Monday(self):
# Regression for #14752
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
res = self.client.get('/dates/books/2008/week/39/monday/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 29))
def test_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(ROOT_URLCONF='generic_views.urls')
class DayArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_day_view(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['day'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev days must be valid.
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/1/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/1/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['day'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_day'], datetime.date(2000, 1, 2))
self.assertEqual(res.context['previous_day'], datetime.date(1999, 12, 31))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/%d/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], None)
def test_day_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['day'], future)
# allow_future but not allow_empty, next/prev must be valid
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month.
res = self.client.get('/dates/books/2008/oct/01/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], future)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
# allow_future for yesterday, next_day is today (#17192)
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
res = self.client.get('/dates/books/%s/allow_empty_and_future/'
% yesterday.strftime('%Y/%b/%d').lower())
self.assertEqual(res.context['next_day'], today)
def test_day_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
def test_next_prev_context(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.content, b"Archive for Oct. 1, 2008. Previous day is May 1, 2006\n")
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/')
self.assertEqual(res.status_code, 200)
def test_day_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/oct/no_day/')
self.assertEqual(res.status_code, 404)
def test_today_view(self):
res = self.client.get('/dates/books/today/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/today/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['day'], datetime.date.today())
def test_datetime_day_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_day_view(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 404)
@override_settings(ROOT_URLCONF='generic_views.urls')
class DateDetailViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_date_detail_by_pk(self):
res = self.client.get('/dates/books/2008/oct/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=1))
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_date_detail_by_slug(self):
res = self.client.get('/dates/books/2006/may/01/byslug/dreaming-in-code/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(slug='dreaming-in-code'))
def test_date_detail_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
def test_date_detail_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", slug="new-new", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/new-new/' % urlbit)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/%s/allow_future/' % (urlbit, b.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], b)
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, "/dates/books/2008/oct/01/nopk/")
def test_get_object_custom_queryset(self):
"""
Ensure that custom querysets are used when provided to
BaseDateDetailView.get_object()
Refs #16918.
"""
res = self.client.get(
'/dates/books/get_object_custom_queryset/2006/may/01/2/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=2))
self.assertEqual(res.context['book'], Book.objects.get(pk=2))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
res = self.client.get(
'/dates/books/get_object_custom_queryset/2008/oct/01/1/')
self.assertEqual(res.status_code, 404)
def test_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 404)
|
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# execsnoop Trace new processes via exec() syscalls.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: execsnoop [-h] [-t] [-x] [-n NAME]
#
# This currently will print up to a maximum of 19 arguments, plus the process
# name, so 20 fields in total (MAXARG).
#
# This won't catch all new processes: an application may fork() but not exec().
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 07-Feb-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
import argparse
import ctypes as ct
import re
import time
from collections import defaultdict
# arguments
examples = """examples:
./execsnoop # trace all exec() syscalls
./execsnoop -x # include failed exec()s
./execsnoop -t # include timestamps
./execsnoop -n main # only print command lines containing "main"
./execsnoop -l tpkg # only print command where arguments contains "tpkg"
"""
parser = argparse.ArgumentParser(
description="Trace exec() syscalls",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-x", "--fails", action="store_true",
help="include failed exec()s")
parser.add_argument("-n", "--name",
help="only print commands matching this name (regex), any arg")
parser.add_argument("-l", "--line",
help="only print commands where arg contains this line (regex)")
args = parser.parse_args()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/fs.h>
#define MAXARG 20
#define ARGSIZE 128
enum event_type {
EVENT_ARG,
EVENT_RET,
};
struct data_t {
u32 pid; // PID as in the userspace term (i.e. task->tgid in kernel)
char comm[TASK_COMM_LEN];
enum event_type type;
char argv[ARGSIZE];
int retval;
};
BPF_PERF_OUTPUT(events);
static int __submit_arg(struct pt_regs *ctx, void *ptr, struct data_t *data)
{
bpf_probe_read(data->argv, sizeof(data->argv), ptr);
events.perf_submit(ctx, data, sizeof(struct data_t));
return 1;
}
static int submit_arg(struct pt_regs *ctx, void *ptr, struct data_t *data)
{
const char *argp = NULL;
bpf_probe_read(&argp, sizeof(argp), ptr);
if (argp) {
return __submit_arg(ctx, (void *)(argp), data);
}
return 0;
}
int kprobe__sys_execve(struct pt_regs *ctx, struct filename *filename,
const char __user *const __user *__argv,
const char __user *const __user *__envp)
{
// create data here and pass to submit_arg to save stack space (#555)
struct data_t data = {};
data.pid = bpf_get_current_pid_tgid() >> 32;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
data.type = EVENT_ARG;
__submit_arg(ctx, (void *)filename, &data);
int i = 1; // skip first arg, as we submitted filename
// unrolled loop to walk argv[] (MAXARG)
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++; // X
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++; // XX
// handle truncated argument list
char ellipsis[] = "...";
__submit_arg(ctx, (void *)ellipsis, &data);
out:
return 0;
}
int kretprobe__sys_execve(struct pt_regs *ctx)
{
struct data_t data = {};
data.pid = bpf_get_current_pid_tgid() >> 32;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
data.type = EVENT_RET;
data.retval = PT_REGS_RC(ctx);
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
"""
# initialize BPF
b = BPF(text=bpf_text)
# header
if args.timestamp:
print("%-8s" % ("TIME(s)"), end="")
print("%-16s %-6s %-6s %3s %s" % ("PCOMM", "PID", "PPID", "RET", "ARGS"))
TASK_COMM_LEN = 16 # linux/sched.h
ARGSIZE = 128 # should match #define in C above
class Data(ct.Structure):
_fields_ = [
("pid", ct.c_uint),
("comm", ct.c_char * TASK_COMM_LEN),
("type", ct.c_int),
("argv", ct.c_char * ARGSIZE),
("retval", ct.c_int),
]
class EventType(object):
EVENT_ARG = 0
EVENT_RET = 1
start_ts = time.time()
argv = defaultdict(list)
# TODO: This is best-effort PPID matching. Short-lived processes may exit
# before we get a chance to read the PPID. This should be replaced with
# fetching PPID via C when available (#364).
def get_ppid(pid):
try:
with open("/proc/%d/status" % pid) as status:
for line in status:
if line.startswith("PPid:"):
return int(line.split()[1])
except IOError:
pass
return 0
# process event
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data)).contents
skip = False
if event.type == EventType.EVENT_ARG:
argv[event.pid].append(event.argv)
elif event.type == EventType.EVENT_RET:
if args.fails and event.retval == 0:
skip = True
if args.name and not re.search(args.name, event.comm):
skip = True
if args.line and not re.search(args.line,
b' '.join(argv[event.pid]).decode()):
skip = True
if not skip:
if args.timestamp:
print("%-8.3f" % (time.time() - start_ts), end="")
ppid = get_ppid(event.pid)
print("%-16s %-6s %-6s %3s %s" % (event.comm.decode(), event.pid,
ppid if ppid > 0 else "?", event.retval,
b' '.join(argv[event.pid]).decode()))
try:
del(argv[event.pid])
except Exception:
pass
# loop with callback to print_event
b["events"].open_perf_buffer(print_event)
while 1:
b.kprobe_poll()
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle.fluid.framework as framework
from paddle.fluid.optimizer import Optimizer
import paddle.fluid.core as core
import numpy as np
from . import ascend_parser
from paddle.distributed import fleet
import hccl.manage.api as hccl
from collections import namedtuple
HcomGroupConfig = namedtuple('HcomGroupConfig', ['name', 'nranks', 'rank_ids'])
__all__ = []
class AscendIRParser(object):
def __init__(self, auto_dp=False, world_rank_size=1):
self.graph_idx = 0
self.hcom_endpoints = {}
self.groups_to_create = []
self._auto_dp = auto_dp
self._world_rank_size = world_rank_size
def _construct_input_map(self, input_varlist):
ret_map = {}
ge_in_operator = []
for id, var in enumerate(input_varlist):
if var.is_data: # input data
ge_input = core.GEOperatorFactory.create_operator(
var.name, "Data").set_attr_int32("index", id)
ret_map[var.name] = ge_input
ge_in_operator.append(ge_input)
else: # param, learning ...
ge_input = core.GEOperatorFactory.create_operator(var.name,
"Variable")
ge_input.update_output_desc("y",
core.GETensorDesc(
core.GEShape(var.shape),
core.GEFormat.FORMAT_ND,
core.GEDataType.DT_FLOAT))
ret_map[var.name] = ge_input
return ge_in_operator, ret_map
def _endpoint_to_world_rank_id(self, endpoint):
world_endpoints = fleet.worker_endpoints()
assert endpoint in world_endpoints, "endpoint (%s) not in worker_endpoints (%s) " % (
endpoint, fleet.world_device_ids())
return world_endpoints.index(endpoint)
def parse_op(self, op):
if op.type == 'c_gen_nccl_id':
endpoint = op.attr("endpoint")
other_endpoints = op.attr("other_endpoints")
rank = op.attr("rank")
nccl_id = op.output_arg_names[0]
# c_gen_nccl_id operator splits endpoints into local endpoint and other_endpoints
# we should combine these together to produce world_rank_ids
self.hcom_endpoints[nccl_id] = other_endpoints[:]
self.hcom_endpoints[nccl_id].insert(rank, endpoint)
print("nccl_id (%s) registered endpoints %s" %
(nccl_id, self.hcom_endpoints[nccl_id]))
elif op.type == 'c_comm_init':
nccl_id = op.input_arg_names[0]
nranks = op.attr("nranks")
assert nranks == len(self.hcom_endpoints[
nccl_id]), "nranks doesn't match endpoint count"
rank = op.attr("rank")
ring_id = op.attr("ring_id")
group_name = "hcom_group_" + str(ring_id)
global_rank_ids = [
self._endpoint_to_world_rank_id(endpoint)
for endpoint in self.hcom_endpoints[nccl_id]
]
self.groups_to_create.append(
HcomGroupConfig(
name=group_name, nranks=nranks, rank_ids=global_rank_ids))
print("append to create group: %s, with rank_ids: %s" %
(group_name, global_rank_ids))
elif op.type in ascend_parser.registerd_op:
op_parser = self.parser_factory.create_parse(
ascend_parser.registerd_op[op.type])
op_parser.apply(op)
else:
assert False, "Op[%s] has not been registered, so we have to skip it" % (
op.type)
def _parse_program(self,
graph_name,
program,
input_varlist=[],
fetch_list=[]):
begin_graph_idx = self.graph_idx
ge_in_operator = []
ge_out_operator = []
self.var2geop = {}
block = program.global_block()
if len(block.ops) == 0:
print("There is no ops in program %s" % (graph_name))
return []
graph = core.GEGraph(graph_name)
ge_in_operator, self.var2geop = self._construct_input_map(input_varlist)
self.parser_factory = ascend_parser.AscendParserFactory(graph,
self.var2geop)
for i, curop in list(enumerate(block.ops)):
self.parse_op(curop)
# Set fetch_var for GE
for e in fetch_list:
name = e
if not isinstance(e, str):
name = e.name
ge_out_operator.append(self.var2geop[name])
# (Debug) If you want to print back prop vars, append/assign the varname in ge_out_operator here, such as:
# if graph_name == "main":
# ge_out_operator.append(self.var2geop["reduce_sum_0.tmp_0@GRAD"])
# Add ops that may be input of a graph, such as const.
for varname, geop in self.var2geop.items():
if varname.startswith("geinput"):
ge_in_operator.append(geop)
graph.set_inputs(ge_in_operator).set_outputs(ge_out_operator)
# Remove ops of origin program
op_num = len(block.ops)
for i in range(op_num - 1, -1, -1):
block._remove_op(i)
input_varlist = [var for var in input_varlist if var.is_data]
block.append_op(
type="ascend_trigger",
inputs={"FeedList": input_varlist},
outputs={"FetchList": fetch_list},
attrs={'graph_idx': self.graph_idx})
self.graph_idx += 1
return graph
def parse_program(self, startup_program, main_program, input_varlist,
fetch_list):
startup_graph = self._parse_program("startup", startup_program)
main_graph = self._parse_program("main", main_program, input_varlist,
fetch_list)
if self._auto_dp and self._world_rank_size > 1:
assert len(self.groups_to_create
) == 0, "can't parse program under auto_dp mode"
from paddle.distributed import fleet
self.groups_to_create.append(
HcomGroupConfig(
name="hcom_group_0",
nranks=fleet.world_size(),
rank_ids=[x for x in range(fleet.world_size())]))
return startup_graph, main_graph
# AscendOptimizer is a wrapper for basic optimizer now
# We will make it part of fleet meta_optimizer in the future
class AscendOptimizer(Optimizer):
def __init__(self, optimizer, fetch_list=[]):
self.inner_opt = optimizer
self.fetch_list = fetch_list
self.ascend_instance = None
def __del__(self):
print("begin AscendOptimizer del")
if self.ascend_instance is not None:
self.ascend_instance.destroy_global_resources()
core.ge_finalize()
print("end AscendOptimizer del")
def _can_apply(self):
if not self.user_defined_strategy.ascend:
return False
# TODO(hutuxian): other check here
return True
def _disable_strategy(self, dist_strategy):
dist_strategy.ascend = False
dist_strategy.ascend_configs = {}
def _get_input_varlist(self, program):
ret_list = []
for var in program.list_vars():
if var.is_data or var.persistable:
ret_list.append(var)
return ret_list
def minimize(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
auto_dp=False,
rank_table_file=None,
precision_mode="must_keep_origin_dtype"):
minimized = None
if self.inner_opt:
minimized = self.inner_opt.minimize(
loss, startup_program=startup_program)
self.ascend_instance = core.AscendInstance()
from paddle.distributed import fleet
if auto_dp and fleet.world_size() > 1:
from paddle.fluid.transpiler import ascend_transpiler
t = ascend_transpiler.AscendTranspiler(startup_program,
loss.block.program)
t.transpile()
#print(loss.block.program)
# Config about Graph Engine can be found in https://support.huaweicloud.com/
config = {
"ge.exec.deviceId": str(fleet.local_device_ids()),
"ge.graphRunMode": "1",
"ge.exec.precision_mode": precision_mode,
}
# if multi trainers
if rank_table_file and fleet.world_size() > 1:
config["ge.exec.rankTableFile"] = rank_table_file
config["ge.exec.rankId"] = str(fleet.worker_index())
config["ge.exec.isUseHcom"] = "1"
config["ge.exec.deployMode"] = "0"
print("ge_initialize config:", config)
core.ge_initialize(config)
# Init Session
self.ascend_instance.init_global_resources()
main_block = loss.block
self.parser = AscendIRParser(
auto_dp=auto_dp, world_rank_size=fleet.world_size())
input_varlist = self._get_input_varlist(main_block.program)
startup_graph, main_graph = self.parser.parse_program(
startup_program, main_block.program, input_varlist, self.fetch_list)
for cfg in self.parser.groups_to_create:
print("create group (%s), nranks: %d, rank_ids: %s" %
(cfg.name, cfg.nranks, cfg.rank_ids))
hccl.create_group(cfg.name, cfg.nranks, cfg.rank_ids)
self.ascend_instance.add_ascend_subgraph(0, startup_graph)
self.ascend_instance.add_ascend_subgraph(1, main_graph)
return minimized
|
|
from collections import namedtuple
import os
import os.path as path
import gzip
import json
import socket
import sys
import urllib2
from nflgame import OrderedDict
import nflgame.player
import nflgame.schedule
import nflgame.seq
import nflgame.statmap
_MAX_INT = sys.maxint
_jsonf = path.join(path.split(__file__)[0], 'gamecenter-json', '%s.json.gz')
_json_base_url = "http://www.nfl.com/liveupdate/game-center/%s/%s_gtd.json"
GameDiff = namedtuple('GameDiff', ['before', 'after', 'plays', 'players'])
"""
Represents the difference between two points in time of the same game
in terms of plays and player statistics.
"""
TeamStats = namedtuple('TeamStats',
['first_downs', 'total_yds', 'passing_yds',
'rushing_yds', 'penalty_cnt', 'penalty_yds',
'turnovers', 'punt_cnt', 'punt_yds', 'punt_avg',
'pos_time'])
"""A collection of team statistics for an entire game."""
class FieldPosition (object):
"""
Represents field position.
The representation here is an integer offset where the 50 yard line
corresponds to '0'. Being in the own territory corresponds to a negative
offset while being in the opponent's territory corresponds to a positive
offset.
e.g., NE has the ball on the NE 45, the offset is -5.
e.g., NE has the ball on the NYG 2, the offset is 48.
This representation allows for gains in any particular play to be added
to the field offset to get the new field position as the result of the
play.
"""
def __new__(cls, pos_team=None, yardline=None, offset=None):
if not yardline and offset is None:
return None
return object.__new__(cls)
def __init__(self, pos_team=None, yardline=None, offset=None):
"""
pos_team is the team on offense, and yardline is a string formatted
like 'team-territory yard-line'. e.g., "NE 32".
An offset can be given directly by specifying an integer for offset.
"""
if isinstance(offset, int):
self.offset = offset
return
if yardline == '50':
self.offset = 0
return
territory, yd_str = yardline.split()
yd = int(yd_str)
if territory == pos_team:
self.offset = -(50 - yd)
else:
self.offset = 50 - yd
def __cmp__(self, other):
if isinstance(other, int):
return cmp(self.offset, other)
return cmp(self.offset, other.offset)
def __str__(self):
if self.offset > 0:
return 'OPP %d' % (50 - self.offset)
elif self.offset < 0:
return 'OWN %d' % (50 + self.offset)
else:
return 'MIDFIELD'
def add_yards(self, yards):
"""
Returns a new field position with the yards added to self.
Yards may be negative.
"""
newoffset = max(-50, min(50, self.offset + yards))
return FieldPosition(offset=newoffset)
class PossessionTime (object):
"""
Represents the amount of time a drive lasted in (minutes, seconds).
"""
def __init__(self, clock):
self.clock = clock
try:
self.minutes, self.seconds = map(int, self.clock.split(':'))
except ValueError:
self.minutes, self.seconds = 0, 0
def total_seconds(self):
"""
Returns the total number of seconds that this possession lasted for.
"""
return self.seconds + self.minutes * 60
def __cmp__(self, other):
a, b = (self.minutes, self.seconds), (other.minutes, other.seconds)
return cmp(a, b)
def __add__(self, other):
new_time = PossessionTime('0:00')
total_seconds = self.total_seconds() + other.total_seconds()
new_time.minutes = total_seconds / 60
new_time.seconds = total_seconds % 60
new_time.clock = '%.2d:%.2d' % (new_time.minutes, new_time.seconds)
return new_time
def __sub__(self, other):
assert self >= other
new_time = PossessionTime('0:00')
total_seconds = self.total_seconds() - other.total_seconds()
new_time.minutes = total_seconds / 60
new_time.seconds = total_seconds % 60
new_time.clock = '%.2d:%.2d' % (new_time.minutes, new_time.seconds)
return new_time
def __str__(self):
return self.clock
class GameClock (object):
"""
Represents the current time in a game. Namely, it keeps track of the
quarter and clock time. Also, GameClock can represent whether
the game hasn't started yet, is half time or if it's over.
"""
def __init__(self, qtr, clock):
self.qtr = qtr
self.clock = clock
try:
self._minutes, self._seconds = map(int, self.clock.split(':'))
except ValueError:
self._minutes, self._seconds = 0, 0
except AttributeError:
self._minutes, self._seconds = 0, 0
try:
self.__qtr = int(self.qtr)
if self.__qtr >= 3:
self.__qtr += 1 # Let halftime be quarter 3
except ValueError:
if self.is_pregame():
self.__qtr = 0
elif self.is_halftime():
self.__qtr = 3
elif self.is_final():
self.__qtr = sys.maxint
else:
assert False, 'Unknown QTR value: "%s"' % self.qtr
@property
def quarter(self):
return self.__qtr
@quarter.setter
def quarter(self, value):
if isinstance(value, int):
assert value >= 0 and value <= 4
self.qtr = str(value)
self.__qtr = value
else:
self.qtr = value
self.__qtr = 0
def is_pregame(self):
return self.qtr == 'Pregame'
def is_halftime(self):
return self.qtr == 'Halftime'
def is_final(self):
return 'final' in self.qtr.lower()
def __cmp__(self, other):
if self.__qtr != other.__qtr:
return cmp(self.__qtr, other.__qtr)
elif self._minutes != other._minutes:
return cmp(other._minutes, self._minutes)
return cmp(other._seconds, self._seconds)
def __str__(self):
"""
Returns a nicely formatted string indicating the current time of the
game. Examples include "Q1 10:52", "Q4 1:25", "Pregame", "Halftime"
and "Final".
"""
try:
q = int(self.qtr)
return 'Q%d %s' % (q, self.clock)
except ValueError:
return self.qtr
class Game (object):
"""
Game represents a single pre- or regular-season game. It provides a window
into the statistics of every player that played into the game, along with
the winner of the game, the score and a list of all the scoring plays.
"""
def __new__(cls, eid=None, fpath=None):
# If we can't get a valid JSON data, exit out and return None.
try:
rawData = _get_json_data(eid, fpath)
except urllib2.URLError:
return None
if rawData is None or rawData.strip() == '{}':
return None
game = object.__new__(cls)
game.rawData = rawData
try:
if eid is not None:
game.eid = eid
game.data = json.loads(game.rawData)[game.eid]
else: # For when we have rawData (fpath) and no eid.
game.eid = None
game.data = json.loads(game.rawData)
for k, v in game.data.iteritems():
if isinstance(v, dict):
game.eid = k
game.data = v
break
assert game.eid is not None
except ValueError:
return None
return game
def __init__(self, eid=None, fpath=None):
"""
Creates a new Game instance given a game identifier.
The game identifier is used by NFL.com's GameCenter live update web
pages. It is used to construct a URL to download JSON data for the
game.
If the game has been completed, the JSON data will be cached to disk
so that subsequent accesses will not re-download the data but instead
read it from disk.
When the JSON data is written to disk, it is compressed using gzip.
"""
# Make the schedule info more accessible.
self.schedule = nflgame.schedule.games_byid.get(self.eid, None)
# Home and team cumulative statistics.
self.home = self.data['home']['abbr']
self.away = self.data['away']['abbr']
self.stats_home = _json_team_stats(self.data['home']['stats']['team'])
self.stats_away = _json_team_stats(self.data['away']['stats']['team'])
# Load up some simple static values.
self.gamekey = nflgame.schedule.games_byid[self.eid]['gamekey']
self.time = GameClock(self.data['qtr'], self.data['clock'])
self.down = _tryint(self.data['down'])
self.togo = _tryint(self.data['togo'])
self.score_home = int(self.data['home']['score']['T'])
self.score_away = int(self.data['away']['score']['T'])
for q in (1, 2, 3, 4, 5):
for team in ('home', 'away'):
score = self.data[team]['score'][str(q)]
self.__dict__['score_%s_q%d' % (team, q)] = int(score)
if not self.game_over():
self.winner = None
else:
if self.score_home > self.score_away:
self.winner = self.home
self.loser = self.away
elif self.score_away > self.score_home:
self.winner = self.away
self.loser = self.home
else:
self.winner = '%s/%s' % (self.home, self.away)
self.loser = '%s/%s' % (self.home, self.away)
# Load the scoring summary into a simple list of strings.
self.scores = []
for k in sorted(map(int, self.data['scrsummary'])):
play = self.data['scrsummary'][str(k)]
s = '%s - Q%d - %s - %s' \
% (play['team'], play['qtr'], play['type'], play['desc'])
self.scores.append(s)
# Check to see if the game is over, and if so, cache the data.
if self.game_over() and not os.access(_jsonf % eid, os.R_OK):
self.save()
def is_home(self, team):
"""Returns true if team (i.e., 'NE') is the home team."""
return team == self.home
def season(self):
"""Returns the year of the season this game belongs to."""
year = int(self.eid[0:4])
month = int(self.eid[4:6])
if month <= 3:
year -= 1
return year
def game_over(self):
"""game_over returns true if the game is no longer being played."""
return self.time.is_final()
def playing(self):
"""playing returns true if the game is currently being played."""
return not self.time.is_pregame() and not self.time.is_final()
def save(self, fpath=None):
"""
Save the JSON data to fpath. This is done automatically if the
game is over.
"""
if fpath is None:
fpath = _jsonf % self.eid
try:
print >> gzip.open(fpath, 'w+'), self.rawData,
except IOError:
print >> sys.stderr, "Could not cache JSON data. Please " \
"make '%s' writable." \
% os.path.dirname(fpath)
def nice_score(self):
"""
Returns a string of the score of the game.
e.g., "NE (32) vs. NYG (0)".
"""
return '%s (%d) at %s (%d)' \
% (self.away, self.score_away, self.home, self.score_home)
def max_player_stats(self):
"""
Returns a GenPlayers sequence of player statistics that combines
game statistics and play statistics by taking the max value of
each corresponding statistic.
This is useful when accuracy is desirable. Namely, using only
play-by-play data or using only game statistics can be unreliable.
That is, both are inconsistently correct.
Taking the max values of each statistic reduces the chance of being
wrong (particularly for stats that are in both play-by-play data
and game statistics), but does not eliminate them.
"""
game_players = list(self.players)
play_players = list(self.drives.plays().players())
max_players = OrderedDict()
# So this is a little tricky. It's possible for a player to have
# only statistics at the play level, and therefore not be represented
# in the game level statistics. Therefore, we initialize our
# max_players with play-by-play stats first. Then go back through
# and combine them with available game statistics.
for pplay in play_players:
newp = nflgame.player.GamePlayerStats(pplay.playerid,
pplay.name, pplay.home,
pplay.team)
maxstats = {}
for stat, val in pplay._stats.iteritems():
maxstats[stat] = val
newp._overwrite_stats(maxstats)
max_players[pplay.playerid] = newp
for newp in max_players.itervalues():
for pgame in game_players:
if pgame.playerid != newp.playerid:
continue
maxstats = {}
for stat, val in pgame._stats.iteritems():
maxstats[stat] = max([val,
newp._stats.get(stat, -_MAX_INT)])
newp._overwrite_stats(maxstats)
break
return nflgame.seq.GenPlayerStats(max_players)
def __getattr__(self, name):
if name == 'players':
self.__players = _json_game_player_stats(self, self.data)
self.players = nflgame.seq.GenPlayerStats(self.__players)
return self.players
if name == 'drives':
self.__drives = _json_drives(self, self.home, self.data['drives'])
self.drives = nflgame.seq.GenDrives(self.__drives)
return self.drives
raise AttributeError
def __sub__(self, other):
return diff(other, self)
def __str__(self):
return self.nice_score()
def diff(before, after):
"""
Returns the difference between two points of time in a game in terms of
plays and player statistics. The return value is a GameDiff namedtuple
with two attributes: plays and players. Each contains *only* the data
that is in the after game but not in the before game.
This is useful for sending alerts where you're guaranteed to see each
play statistic only once (assuming NFL.com behaves itself).
"""
assert after.eid == before.eid
plays = []
after_plays = list(after.drives.plays())
before_plays = list(before.drives.plays())
for play in after_plays:
if play not in before_plays:
plays.append(play)
# You might think that updated play data is enough. You could scan
# it for statistics you're looking for (like touchdowns).
# But sometimes a play can sneak in twice if its description gets
# updated (late call? play review? etc.)
# Thus, we do a diff on the play statistics for player data too.
_players = OrderedDict()
after_players = list(after.max_player_stats())
before_players = list(before.max_player_stats())
for aplayer in after_players:
has_before = False
for bplayer in before_players:
if aplayer.playerid == bplayer.playerid:
has_before = True
pdiff = aplayer - bplayer
if pdiff is not None:
_players[aplayer.playerid] = pdiff
if not has_before:
_players[aplayer.playerid] = aplayer
players = nflgame.seq.GenPlayerStats(_players)
return GameDiff(before=before, after=after, plays=plays, players=players)
class Drive (object):
"""
Drive represents a single drive in an NFL game. It contains a list
of all plays that happened in the drive, in chronological order.
It also contains meta information about the drive such as the start
and stop times and field position, length of possession, the number
of first downs and a short descriptive string of the result of the
drive.
"""
def __init__(self, game, drive_num, home_team, data):
if data is None or 'plays' not in data or len(data['plays']) == 0:
return
self.game = game
self.drive_num = drive_num
self.team = data['posteam']
self.home = self.team == home_team
self.first_downs = int(data['fds'])
self.result = data['result']
self.penalty_yds = int(data['penyds'])
self.total_yds = int(data['ydsgained'])
self.pos_time = PossessionTime(data['postime'])
self.play_cnt = int(data['numplays'])
self.field_start = FieldPosition(self.team, data['start']['yrdln'])
self.time_start = GameClock(data['start']['qtr'],
data['start']['time'])
# When the game is over, the yardline isn't reported. So find the
# last play that does report a yardline.
if data['end']['yrdln'].strip():
self.field_end = FieldPosition(self.team, data['end']['yrdln'])
else:
self.field_end = None
playids = sorted(map(int, data['plays'].keys()), reverse=True)
for pid in playids:
yrdln = data['plays'][str(pid)]['yrdln'].strip()
if yrdln:
self.field_end = FieldPosition(self.team, yrdln)
break
if self.field_end is None:
self.field_end = FieldPosition(self.team, '50')
# When a drive lasts from Q1 to Q2 or Q3 to Q4, the 'end' doesn't
# seem to change to the proper quarter. So scan all of the plays
# and use the maximal quarter listed. (Just taking the last doesn't
# seem to always work.)
# lastplayid = str(max(map(int, data['plays'].keys())))
# endqtr = data['plays'][lastplayid]['qtr']
qtrs = [p['qtr'] for p in data['plays'].values()]
maxq = str(max(map(int, qtrs)))
self.time_end = GameClock(maxq, data['end']['time'])
# One last sanity check. If the end time is less than the start time,
# then bump the quarter if it seems reasonable.
# This technique will blow up if a drive lasts more than fifteen
# minutes and the quarter numbering is messed up.
if self.time_end <= self.time_start \
and self.time_end.quarter in (1, 3):
self.time_end.quarter += 1
self.__plays = _json_plays(self, data['plays'])
self.plays = nflgame.seq.GenPlays(self.__plays)
def __add__(self, other):
"""
Adds the statistics of two drives together.
Note that once two drives are added, the following fields
automatically get None values: result, field_start, field_end,
time_start and time_end.
"""
assert self.team == other.team, \
'Cannot add drives from different teams "%s" and "%s".' \
% (self.team, other.team)
new_drive = Drive(None, 0, '', None)
new_drive.team = self.team
new_drive.home = self.home
new_drive.first_downs = self.first_downs + other.first_downs
new_drive.penalty_yds = self.penalty_yds + other.penalty_yds
new_drive.total_yds = self.total_yds + other.total_yds
new_drive.pos_time = self.pos_time + other.pos_time
new_drive.play_cnt = self.play_cnt + other.play_cnt
new_drive.__plays = self.__plays + other.__plays
new_drive.result = None
new_drive.field_start = None
new_drive.field_end = None
new_drive.time_start = None
new_drive.time_end = None
return new_drive
def __str__(self):
return '%s (Start: %s, End: %s) %s' \
% (self.team, self.time_start, self.time_end, self.result)
class Play (object):
"""
Play represents a single play. It contains a list of all players
that participated in the play (including offense, defense and special
teams). The play also includes meta information about what down it
is, field position, clock time, etc.
Play objects also contain team-level statistics, such as whether the
play was a first down, a fourth down failure, etc.
"""
def __init__(self, drive, playid, data):
self.data = data
self.drive = drive
self.playid = playid
self.team = data['posteam']
self.home = self.drive.home
self.desc = data['desc']
self.note = data['note']
self.down = int(data['down'])
self.yards_togo = int(data['ydstogo'])
self.touchdown = 'touchdown' in self.desc.lower()
self._stats = {}
if not self.team:
self.time, self.yardline = None, None
else:
self.time = GameClock(data['qtr'], data['time'])
self.yardline = FieldPosition(self.team, data['yrdln'])
# Load team statistics directly into the Play instance.
# Things like third down attempts, first downs, etc.
if '0' in data['players']:
for info in data['players']['0']:
if info['statId'] not in nflgame.statmap.idmap:
continue
statvals = nflgame.statmap.values(info['statId'],
info['yards'])
for k, v in statvals.iteritems():
v = self.__dict__.get(k, 0) + v
self.__dict__[k] = v
self._stats[k] = v
# Load the sequence of "events" in a play into a list of dictionaries.
self.events = _json_play_events(data['players'])
# Now load cumulative player data for this play into
# a GenPlayerStats generator. We then flatten this data
# and add it to the play itself so that plays can be
# filter by these statistics.
self.__players = _json_play_players(self, data['players'])
self.players = nflgame.seq.GenPlayerStats(self.__players)
for p in self.players:
for k, v in p.stats.iteritems():
# Sometimes we may see duplicate statistics (like tackle
# assists). Let's just overwrite in this case, since this
# data is from the perspective of the play. i.e., there
# is one assisted tackle rather than two.
self.__dict__[k] = v
self._stats[k] = v
def has_player(self, playerid):
"""Whether a player with id playerid participated in this play."""
return playerid in self.__players
def __str__(self):
if self.team:
if self.down != 0:
return '(%s, %s, Q%d, %d and %d) %s' \
% (self.team, self.data['yrdln'], self.time.qtr,
self.down, self.yards_togo, self.desc)
else:
return '(%s, %s, Q%d) %s' \
% (self.team, self.data['yrdln'], self.time.qtr,
self.desc)
return self.desc
def __eq__(self, other):
"""
We use the play description to determine equality because the
play description can be changed. (Like when a play is reversed.)
"""
return self.playid == other.playid and self.desc == other.desc
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError
return 0
def _json_team_stats(data):
"""
Takes a team stats JSON entry and converts it to a TeamStats namedtuple.
"""
return TeamStats(
first_downs=int(data['totfd']),
total_yds=int(data['totyds']),
passing_yds=int(data['pyds']),
rushing_yds=int(data['ryds']),
penalty_cnt=int(data['pen']),
penalty_yds=int(data['penyds']),
turnovers=int(data['trnovr']),
punt_cnt=int(data['pt']),
punt_yds=int(data['ptyds']),
punt_avg=int(data['ptavg']),
pos_time=PossessionTime(data['top']))
def _json_drives(game, home_team, data):
"""
Takes a home or away JSON entry and converts it to a list of Drive
objects.
"""
drive_nums = []
for drive_num in data:
try:
drive_nums.append(int(drive_num))
except:
pass
drives = []
for i, drive_num in enumerate(sorted(drive_nums), 1):
d = Drive(game, i, home_team, data[str(drive_num)])
if not hasattr(d, 'game'): # not a valid drive
continue
drives.append(d)
return drives
def _json_plays(drive, data):
"""
Takes a single JSON drive entry (data) and converts it to a list
of Play objects. This includes trying to resolve duplicate play
conflicts by only taking the first instance of a play.
"""
plays = []
seen_ids = set()
seen_desc = set() # Sometimes duplicates have different play ids...
for playid in map(str, sorted(map(int, data))):
p = data[playid]
desc = (p['desc'], p['time'], p['yrdln'], p['qtr'])
if playid in seen_ids or desc in seen_desc:
continue
seen_ids.add(playid)
seen_desc.add(desc)
plays.append(Play(drive, playid, data[playid]))
return plays
def _json_play_players(play, data):
"""
Takes a single JSON play entry (data) and converts it to an OrderedDict
of player statistics.
play is the instance of Play that this data is part of. It is used
to determine whether the player belong to the home team or not.
"""
players = OrderedDict()
for playerid, statcats in data.iteritems():
if playerid == '0':
continue
for info in statcats:
if info['statId'] not in nflgame.statmap.idmap:
continue
if playerid not in players:
home = play.drive.game.is_home(info['clubcode'])
if home:
team_name = play.drive.game.home
else:
team_name = play.drive.game.away
stats = nflgame.player.PlayPlayerStats(playerid,
info['playerName'],
home, team_name)
players[playerid] = stats
statvals = nflgame.statmap.values(info['statId'], info['yards'])
players[playerid]._add_stats(statvals)
return players
def _json_play_events(data):
"""
Takes a single JSON play entry (data) and converts it to a list of events.
"""
temp = list()
for playerid, statcats in data.iteritems():
for info in statcats:
if info['statId'] not in nflgame.statmap.idmap:
continue
statvals = nflgame.statmap.values(info['statId'], info['yards'])
statvals['playerid'] = None if playerid == '0' else playerid
statvals['playername'] = info['playerName'] or None
statvals['team'] = info['clubcode']
temp.append((int(info['sequence']), statvals))
return [t[1] for t in sorted(temp, key=lambda t: t[0])]
def _json_game_player_stats(game, data):
"""
Parses the 'home' and 'away' team stats and returns an OrderedDict
mapping player id to their total game statistics as instances of
nflgame.player.GamePlayerStats.
"""
players = OrderedDict()
for team in ('home', 'away'):
for category in nflgame.statmap.categories:
if category not in data[team]['stats']:
continue
for pid, raw in data[team]['stats'][category].iteritems():
stats = {}
for k, v in raw.iteritems():
if k == 'name':
continue
stats['%s_%s' % (category, k)] = v
if pid not in players:
home = team == 'home'
if home:
team_name = game.home
else:
team_name = game.away
players[pid] = nflgame.player.GamePlayerStats(pid,
raw['name'],
home,
team_name)
players[pid]._add_stats(stats)
return players
def _get_json_data(eid=None, fpath=None):
"""
Returns the JSON data corresponding to the game represented by eid.
If the JSON data is already on disk, it is read, decompressed and returned.
Otherwise, the JSON data is downloaded from the NFL web site. If the data
doesn't exist yet or there was an error, _get_json_data returns None.
If eid is None, then the JSON data is read from the file at fpath.
"""
assert eid is not None or fpath is not None
if fpath is not None:
return gzip.open(fpath).read()
fpath = _jsonf % eid
if os.access(fpath, os.R_OK):
return gzip.open(fpath).read()
try:
return urllib2.urlopen(_json_base_url % (eid, eid), timeout=5).read()
except urllib2.HTTPError:
pass
except socket.timeout:
pass
return None
def _tryint(v):
"""
Tries to convert v to an integer. If it fails, return 0.
"""
try:
return int(v)
except:
return 0
|
|
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.compat import product as cart_product
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, compat, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(compat.builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(compat.builtins.sum)
result2 = grouped.apply(compat.builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name='A')
expected_col = pd.MultiIndex(levels=[['B'],
['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max']],
codes=[[0] * 8, list(range(8))])
expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]],
index=expected_index,
columns=expected_col)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T])
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby('A').cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
df['b'] = df.b.astype(dtype)
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize("i", [
(Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448")),
(24650000000000001, 24650000000000002)
])
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1],
"args": [1]},
"count": {"expected": 2}}
for method, data in compat.iteritems(grp_exp):
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df['value'] = df['value'].astype(float)
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
def test_ops_general():
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size), ]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(compat.StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r['File'].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([
7, 5, 3, 10, 9, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
tm.assert_series_equal(gb.nlargest(3, keep='last'), e)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([
1, 2, 3, 0, 4, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
tm.assert_series_equal(gb.nsmallest(3, keep='last'), e)
@pytest.mark.parametrize("func", [
'mean', 'var', 'std', 'cumprod', 'cumsum'
])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]})
g = df.groupby('A')
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin_cummax():
# GH 15048
num_types = [np.int32, np.int64, np.float32, np.float64]
num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min,
np.finfo(np.float32).min, np.finfo(np.float64).min]
num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max,
np.finfo(np.float32).max, np.finfo(np.float64).max]
base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2],
'B': [3, 4, 3, 2, 2, 3, 2, 1]})
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
for dtype, min_val, max_val in zip(num_types, num_mins, num_max):
df = base_df.astype(dtype)
# cummin
expected = pd.DataFrame({'B': expected_mins}).astype(dtype)
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummin w/ min value for dtype
df.loc[[2, 6], 'B'] = min_val
expected.loc[[2, 3, 6, 7], 'B'] = min_val
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# cummax
expected = pd.DataFrame({'B': expected_maxs}).astype(dtype)
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummax w/ max value for dtype
df.loc[[2, 6], 'B'] = max_val
expected.loc[[2, 3, 6, 7], 'B'] = max_val
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], 'B'] = np.nan
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2,
np.nan, 3, np.nan, 1]})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummin())
.to_frame())
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4,
np.nan, 3, np.nan, 3]})
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummax())
.to_frame())
tm.assert_frame_equal(result, expected)
# Test nan in entire column
base_df['B'] = np.nan
expected = pd.DataFrame({'B': [np.nan] * 8})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
for method in ['cummax', 'cummin']:
result = getattr(df.groupby('a')['b'], method)()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby('a').b.cummax()
expected = pd.Series([2, 1, 2], name='b')
tm.assert_series_equal(result, expected)
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby('a').b.cummin()
expected = pd.Series([1, 2, 1], name='b')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
[True, False, False, True]),
# Test with inf vals
([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_increasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = (
df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
[True, False, False, True]),
# Test with inf vals
([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_decreasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result['mean'], grouped.mean(),
check_names=False)
tm.assert_series_equal(result['std'], grouped.std(), check_names=False)
tm.assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ['C']].groupby(df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
codes=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby('k').describe()
with pytest.raises(ValueError, match=msg):
df2.groupby('key').describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
@pytest.mark.parametrize('n', 10 ** np.arange(2, 6))
@pytest.mark.parametrize('m', [10, 100, 1000])
@pytest.mark.parametrize('sort', [False, True])
@pytest.mark.parametrize('dropna', [False, True])
def test_series_groupby_nunique(n, m, sort, dropna):
def check_nunique(df, keys, as_index=True):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
tm.assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
frame = DataFrame({'jim': np.random.choice(list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique():
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
tm.assert_frame_equal(result, expected)
# dropna
expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
index=list('abc'))
expected.index.name = 'A'
result = df.replace({'x': None}).groupby('A').nunique()
tm.assert_frame_equal(result, expected)
def test_nunique_with_object():
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
def test_nunique_with_empty_series():
# GH 12553
data = pd.Series(name='name')
result = data.groupby(level=0).nunique()
expected = pd.Series(name='name', dtype='int64')
tm.assert_series_equal(result, expected)
def test_nunique_with_timegrouper():
# GH 13453
test = pd.DataFrame({
'time': [Timestamp('2016-06-28 09:35:35'),
Timestamp('2016-06-28 16:09:30'),
Timestamp('2016-06-28 16:46:28')],
'data': ['1', '2', '3']}).set_index('time')
result = test.groupby(pd.Grouper(freq='h'))['data'].nunique()
expected = test.groupby(
pd.Grouper(freq='h')
)['data'].apply(pd.Series.nunique)
tm.assert_series_equal(result, expected)
# count
# --------------------------------
def test_groupby_timedelta_cython_count():
df = DataFrame({'g': list('ab' * 2),
'delt': np.arange(4).astype('timedelta64[ns]')})
expected = Series([
2, 2
], index=pd.Index(['a', 'b'], name='g'), name='delt')
result = df.groupby('g').delt.count()
tm.assert_series_equal(expected, result)
def test_count():
n = 1 << 15
dr = date_range('2015-08-30', periods=n // 10, freq='T')
df = DataFrame({
'1st': np.random.choice(
list(ascii_lowercase), n),
'2nd': np.random.randint(0, 5, n),
'3rd': np.random.randn(n).round(3),
'4th': np.random.randint(-10, 10, n),
'5th': np.random.choice(dr, n),
'6th': np.random.randn(n).round(3),
'7th': np.random.randn(n).round(3),
'8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
'9th': np.random.choice(
list(ascii_lowercase), n)
})
for col in df.columns.drop(['1st', '2nd', '4th']):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df['9th'] = df['9th'].astype('category')
for key in '1st', '2nd', ['1st', '2nd']:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
tm.assert_frame_equal(left, right)
# GH5610
# count counts non-nulls
df = pd.DataFrame([[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, np.nan]],
columns=['A', 'B', 'C'])
count_as = df.groupby('A').count()
count_not_as = df.groupby('A', as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
tm.assert_frame_equal(count_not_as, expected.reset_index())
tm.assert_frame_equal(count_as, expected)
count_B = df.groupby('A')['B'].count()
tm.assert_series_equal(count_B, expected['B'])
def test_count_object():
df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
3, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
1, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
def test_count_cross_type():
# GH8169
vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint(
0, 2, (100, 2))))
df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
df[df == 2] = np.nan
expected = df.groupby(['c', 'd']).count()
for t in ['float32', 'object']:
df['a'] = df['a'].astype(t)
df['b'] = df['b'].astype(t)
result = df.groupby(['c', 'd']).count()
tm.assert_frame_equal(result, expected)
def test_lower_int_prec_count():
df = DataFrame({'a': np.array(
[0, 1, 2, 100], np.int8),
'b': np.array(
[1, 2, 3, 6], np.uint32),
'c': np.array(
[4, 5, 6, 8], np.int16),
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2],
'b': [2, 2],
'c': [2, 2]}, index=pd.Index(list('ab'),
name='grp'))
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception():
class RaisingObjectException(Exception):
pass
class RaisingObject(object):
def __init__(self, msg='I will raise inside Cython'):
super(RaisingObject, self).__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({'a': [RaisingObject() for _ in range(4)],
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2]}, index=pd.Index(
list('ab'), name='grp'))
tm.assert_frame_equal(result, expected)
# size
# --------------------------------
def test_size(df):
grouped = df.groupby(['A', 'B'])
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = df.groupby('A')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = df.groupby('B')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc'))
for sort, key in cart_product((False, True), ('a', 'b', ['a', 'b'])):
left = df.groupby(key, sort=sort).size()
right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0])
tm.assert_series_equal(left, right, check_names=False)
# GH11699
df = DataFrame([], columns=['A', 'B'])
out = Series([], dtype='int64', index=Index([], name='A'))
tm.assert_series_equal(df.groupby('A').size(), out)
# pipe
# --------------------------------
def test_pipe():
# Test the pipe method of DataFrameGroupBy.
# Issue #17871
random_state = np.random.RandomState(1234567890)
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': random_state.randn(8),
'C': random_state.randn(8)})
def f(dfgb):
return dfgb.B.max() - dfgb.C.min().min()
def square(srs):
return srs ** 2
# Note that the transformations are
# GroupBy -> Series
# Series -> Series
# This then chains the GroupBy.pipe and the
# NDFrame.pipe methods
result = df.groupby('A').pipe(f).pipe(square)
index = Index([u'bar', u'foo'], dtype='object', name=u'A')
expected = pd.Series([8.99110003361, 8.17516964785], name='B',
index=index)
tm.assert_series_equal(expected, result)
def test_pipe_args():
# Test passing args to the pipe method of DataFrameGroupBy.
# Issue #17871
df = pd.DataFrame({'group': ['A', 'A', 'B', 'B', 'C'],
'x': [1.0, 2.0, 3.0, 2.0, 5.0],
'y': [10.0, 100.0, 1000.0, -100.0, -1000.0]})
def f(dfgb, arg1):
return (dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False)
.groupby(dfgb.grouper))
def g(dfgb, arg2):
return dfgb.sum() / dfgb.sum().sum() + arg2
def h(df, arg3):
return df.x + df.y - arg3
result = (df
.groupby('group')
.pipe(f, 0)
.pipe(g, 10)
.pipe(h, 100))
# Assert the results here
index = pd.Index(['A', 'B', 'C'], name='group')
expected = pd.Series([-79.5160891089, -78.4839108911, -80],
index=index)
tm.assert_series_equal(expected, result)
# test SeriesGroupby.pipe
ser = pd.Series([1, 1, 2, 2, 3, 3])
result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())
expected = pd.Series([4, 8, 12], index=pd.Int64Index([1, 2, 3]))
tm.assert_series_equal(result, expected)
def test_groupby_mean_no_overflow():
# Regression test for (#22487)
df = pd.DataFrame({
"user": ["A", "A", "A", "A", "A"],
"connections": [4970, 4749, 4719, 4704, 18446744073699999744]
})
assert df.groupby('user')['connections'].mean()['A'] == 3689348814740003840
|
|
"""
dialogs for ErMagicBuilder
"""
# pylint: disable=W0612,C0111,C0103,W0201,C0301
import wx
import wx.grid
import numpy as np
from pmagpy import contribution_builder as cb
from . import pmag_widgets as pw
from . import grid_frame3
class ErMagicCheckFrame3(wx.Frame):
def __init__(self, parent, title, WD, contribution):
wx.Frame.__init__(self, parent, -1, title)
self.WD = WD
self.main_frame = self.Parent
self.contribution = contribution
self.temp_data = {}
self.grid = None
self.deleteRowButton = None
self.selected_rows = set()
self.min_size = (1160, 350)
self.contribution.propagate_ages()
# re-do the 'quit' binding so that it only closes the current window
self.main_frame.Bind(wx.EVT_MENU, lambda event: self.main_frame.menubar.on_quit(event, self), self.main_frame.menubar.file_quit)
self.InitSpecCheck()
def InitSpecCheck(self):
"""
make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to
"""
#wait = wx.BusyInfo("Please wait, working...")
#wx.SafeYield()
self.contribution.propagate_lithology_cols()
spec_df = self.contribution.tables['specimens'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'specimens', 'specimens', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitSampCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.backButton.Disable()
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def InitSampCheck(self):
"""
make an interactive grid in which users can edit sample names
as well as which site a sample belongs to
"""
# propagate any type/lithology/class data from sites to samples table
# will only overwrite if sample values are blank
self.contribution.propagate_lithology_cols()
samp_df = self.contribution.tables['samples'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'samples', 'samples', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
next_dia = self.InitSiteCheck
prev_dia = self.InitSpecCheck
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, next_dia),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, prev_dia),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def InitSiteCheck(self):
"""
make an interactive grid in which users can edit site names
as well as which location a site belongs to
"""
# propagate average lat/lon info from samples table if
# available in samples and missing in sites
self.contribution.propagate_average_up(cols=['lat', 'lon', 'height'],
target_df_name='sites',
source_df_name='samples')
# propagate lithology columns
self.contribution.propagate_lithology_cols()
site_df = self.contribution.tables['sites'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'sites', 'sites', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitLocCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSampCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def InitLocCheck(self):
"""
make an interactive grid in which users can edit locations
"""
# if there is a location without a name, name it 'unknown'
self.contribution.rename_item('locations', 'nan', 'unknown')
# propagate lat/lon values from sites table
self.contribution.get_min_max_lat_lon()
# propagate lithologies & geologic classes from sites table
self.contribution.propagate_cols_up(['lithologies',
'geologic_classes'], 'locations', 'sites')
res = self.contribution.propagate_min_max_up()
if cb.not_null(res):
self.contribution.propagate_cols_up(['age_unit'], 'locations', 'sites')
# set up frame
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'locations', 'locations', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitAgeCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSiteCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, min_size=self.min_size)
# center
self.grid_frame.Centre()
return
def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
age_df = self.contribution.tables['ages'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'ages', 'ages', self.panel,
main_frame=self.main_frame)
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON, lambda event: self.onContinue(event, grid, None),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitLocCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def on_close_grid_frame(self, event=None):
# required placeholder
pass
def onContinue(self, event, grid, next_dia=None):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.grid_frame.drop_down_menu:
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col names
#self.remove_starred_labels(grid)
grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid_name = str(grid.GetName())
# save all changes to data object and write to file
self.grid_frame.grid_builder.save_grid_data()
# check that all required data are present
validation_errors = self.validate(grid)
if validation_errors:
warn_string = ""
for error_name, error_cols in list(validation_errors.items()):
if error_cols:
warn_string += "You have {}: {}.\n\n".format(error_name, ", ".join(error_cols))
warn_string += "Are you sure you want to continue?"
result = pw.warning_with_override(warn_string)
if result == wx.ID_YES:
pass
else:
return False
else:
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
self.panel.Destroy()
if next_dia:
next_dia()
else:
# propagate any type/lithology/class data from sites to samples table
# will only overwrite if sample values are blank or "Not Specified"
self.contribution.propagate_lithology_cols()
wx.MessageBox('Done!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
# show main frame
self.main_frame.Show()
def onbackButton(self, event=None, prev_dia=None):
if prev_dia:
alert = True if self.grid_frame.grid.changes else False
self.grid_frame.onSave(event=None, alert=alert, destroy=True)
#if self.grid_frame.grid.name == 'samples':
# self.sample_window -= 2
self.panel.Destroy()
prev_dia()
def validate(self, grid):
"""
Using the MagIC data model, generate validation errors on a MagicGrid.
Parameters
----------
grid : dialogs.magic_grid3.MagicGrid
The MagicGrid to be validated
Returns
---------
warnings: dict
Empty dict if no warnings, otherwise a dict with format {name of problem: [problem_columns]}
"""
grid_name = str(grid.GetName())
dmodel = self.contribution.dmodel
reqd_headers = dmodel.get_reqd_headers(grid_name)
df = self.contribution.tables[grid_name].df
df = df.replace('', np.nan) # python does not view empty strings as null
if df.empty:
return {}
col_names = set(df.columns)
missing_headers = set(reqd_headers) - col_names
present_headers = set(reqd_headers) - set(missing_headers)
non_null_headers = df.dropna(how='all', axis='columns').columns
null_reqd_headers = present_headers - set(non_null_headers)
if any(missing_headers) or any (null_reqd_headers):
warnings = {'missing required column(s)': sorted(missing_headers),
'no data in required column(s)': sorted(null_reqd_headers)}
else:
warnings = {}
return warnings
def on_saveButton(self, event, grid):
"""saves any editing of the grid but does not continue to the next window"""
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if self.grid_frame.drop_down_menu: # unhighlight selected columns, etc.
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col labels
starred_cols, hatted_cols = grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid.HideCellEditControl() # removes focus from cell that was being edited
if grid.changes:
self.onSave(grid)
for col in starred_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '**')
for col in hatted_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '^^')
del wait
def on_backButton(self, event, previous_dia, current_dia=None):
# save first?
if self.grid.changes:
result = pw.warning_with_override("You have unsaved data which will be lost. Are you sure you want to go back?")
if result == wx.ID_NO:
return
# go back to previous grid
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if current_dia == self.InitLocCheck:
pass
#elif previous_dia == self.InitSpecCheck or previous_dia == self.InitSampCheck:
# self.sample_window = 0
self.panel.Destroy()
previous_dia()
del wait
### Manage data methods ###
def onSave(self, grid):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.grid_frame.drop_down_menu:
self.grid_frame.drop_down_menu.clean_up()
# save all changes to data object and write to file
self.grid_builder.save_grid_data()
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
|
|
from os import path
import json
import unittest
from unittest import TestCase
from mock import MagicMock, patch, call
from slap.publisher import Publisher
class TestMapServicePublisher(TestCase):
def setUp(self):
config = {
'agsUrl': 'my/server',
'mapServices': {
'services': [
{
'input': 'foo'
}
]
}
}
self.publisher = Publisher('user', 'pwd', config)
def test_publish_all(self):
expected_calls = [call(x) for x in self.publisher.config_parser.service_types]
with patch('slap.publisher.Publisher.publish_services') as mock_publish_services:
self.publisher.publish_all()
mock_publish_services.assert_has_calls(expected_calls)
def test_get_publishing_params_from_config(self):
config = {
'input': 'some/input',
'output': 'some/output',
'serviceName': 'serviceName',
'folderName': 'folderName',
'json': {'foo': 'bar'},
'initialState': 'STOPPED'
}
expected = ('some/input', 'some/output', 'serviceName', 'folderName', {'foo': 'bar'}, 'STOPPED')
self.assertEqual(expected, self.publisher._get_publishing_params_from_config(config))
def test_get_default_publishing_params_from_config(self):
config = {
'input': 'some/input'
}
expected = ('some/input', 'output', 'input', None, {}, 'STARTED')
self.assertEqual(expected, self.publisher._get_publishing_params_from_config(config))
def test_publish_service_no_errors(self):
with patch('slap.publisher.Publisher._get_method_by_service_type') as mock_publish_method:
mock_publish_method.return_value = MagicMock(return_value={'errors': {}})
with patch('slap.publisher.Publisher.publish_sd_draft') as mock_publish_sd_draft:
self.publisher.publish_service('some_type', {'input': 'some/input'})
mock_publish_sd_draft.assert_called_once()
def test_publish_service_with_errors(self):
with patch('slap.publisher.Publisher._get_method_by_service_type') as mock_publish_method:
mock_publish_method.return_value = MagicMock(return_value={'errors': {'something': 'bad'}})
with patch('slap.publisher.Publisher.publish_sd_draft') as mock_publish_sd_draft:
with self.assertRaises(RuntimeError):
self.publisher.publish_service('some_type', {'input': 'some/input'})
mock_publish_sd_draft.assert_not_called()
def test_get_service_definition_paths(self):
expected = ('file', path.abspath('output/file.sddraft'), path.abspath('output/file.sd'))
actual = self.publisher._get_service_definition_paths('/my/file.mxd', 'output')
self.assertEqual(expected, actual)
def test_create_output_directory(self):
pass
def test_set_hostname(self):
config = {
'agsUrl': 'http://server/arcgis',
'mapServices': {
'services': [
{
'input': 'foo'
}
]
}
}
publisher = Publisher('user', 'pwd', config, 'server2')
self.assertEqual(publisher.config['agsUrl'], 'http://server2/arcgis')
def test_service_name_set_by_param(self):
config = {
'input': 'my/filename.mxd',
'serviceName': 'foo',
'json': {
'serviceName': 'bar'
}
}
self.assertEqual(self.publisher._get_service_name_from_config(config), 'foo')
def test_service_name_set_by_json(self):
config = {
'input': 'my/filename.mxd',
'json': {
'serviceName': 'baz'
}
}
self.assertEqual(self.publisher._get_service_name_from_config(config), 'baz')
def test_service_name_set_by_filename(self):
config = {
'input': 'my/filename.mxd',
}
self.assertEqual(self.publisher._get_service_name_from_config(config), 'filename')
def test_raise_exception_when_input_not_found(self):
with self.assertRaises(ValueError):
self.publisher.publish_input('bar')
def test_check_service_type_with_backslashes_in_input(self):
self.publisher.publish_service = MagicMock(return_value=True)
self.publisher.config = json.loads(
'{"imageServices": {"services": [{"input": "\\\\foo\\bar\\baz",'
'"connectionFilePath": "my/service/connection"}]}}')
self.assertTrue(self.publisher._check_service_type('imageServices', '\\foo\bar\baz'))
def test_analysis_successful_true(self):
self.assertTrue(self.publisher.analysis_successful({}))
def test_analysis_successful_raises_exception(self):
with self.assertRaises(RuntimeError):
self.publisher.analysis_successful({'foo': 'bar'})
def test_get_method_by_type(self):
self.assertEqual(self.publisher.arcpy_helper.publish_mxd, self.publisher._get_method_by_service_type('mapServices'))
self.assertEqual(self.publisher.arcpy_helper.publish_gp, self.publisher._get_method_by_service_type('gpServices'))
self.assertEqual(self.publisher.arcpy_helper.publish_image_service,
self.publisher._get_method_by_service_type('imageServices'))
with self.assertRaises(ValueError):
self.publisher._get_method_by_service_type('foo')
def test_register_data_sources(self):
data_sources = [{'foo': 'bar'}]
self.publisher.config = {'dataSources': data_sources}
with patch('slap.esri.ArcpyHelper.register_data_sources') as mock_register:
self.publisher.register_data_sources()
mock_register.assert_called_once_with(data_sources)
def test_delete_service(self):
service_name = 'myService'
folder_name = 'folder'
with patch('slap.api.Api.service_exists') as mock_exists:
mock_exists.return_value = {'exists': True}
with patch('slap.api.Api.delete_service') as mock_delete:
self.publisher.delete_service(service_name, folder_name)
mock_delete.assert_called_once_with(service_name=service_name, folder=folder_name)
def test_delete_service_only_if_exists(self):
service_name = 'myService'
folder_name = 'folder'
with patch('slap.api.Api.service_exists') as mock_exists:
mock_exists.return_value = {'exists': False}
with patch('slap.api.Api.delete_service') as mock_delete:
self.publisher.delete_service(service_name, folder_name)
mock_delete.assert_not_called()
def test_update_service(self):
service_name = 'myService'
folder_name = 'folder'
params = {'foo': 'bar', 'baz': 'quux'}
with patch('slap.api.Api.get_service_params') as mock_get_params:
mock_get_params.return_value = {'baz': 'quux'}
with patch('slap.api.Api.edit_service') as mock_edit:
self.publisher.update_service(service_name, folder_name, {'foo': 'bar'})
mock_edit.assert_called_once_with(service_name=service_name, folder=folder_name, params=params)
@patch('slap.esri.ArcpyHelper.stage_service_definition')
@patch('slap.publisher.Publisher.delete_service')
@patch('slap.esri.ArcpyHelper.upload_service_definition')
@patch('slap.publisher.Publisher.update_service')
class TestMapServicePublisherSdDraft(TestCase):
def setUp(self):
config = {
'agsUrl': 'my/server',
'mapServices': {
'services': [
{
'input': 'foo'
}
]
}
}
self.publisher = Publisher('user', 'pwd', config)
def test_publish_sd_draft(self, mock_update, mock_upload_sd, mock_delete, mock_stage_sd):
sddraft = 'path/to/sddraft'
sd = 'path/to/sd'
service_name = 'myService'
self.publisher.publish_sd_draft(sddraft, sd, service_name)
mock_stage_sd.assert_called_once_with(sddraft=sddraft, sd=sd)
mock_delete.assert_called_once_with(service_name=service_name, folder_name=None)
mock_upload_sd.assert_called_once_with(sd=sd, initial_state='STARTED')
mock_update.assert_not_called()
def test_publish_sd_draft_with_json(self, mock_update, mock_upload_sd, mock_delete, mock_stage_sd):
sddraft = 'path/to/sddraft'
sd = 'path/to/sd'
service_name = 'myService'
folder = 'folder'
initial_state = 'STOPPED'
json = {'foo': 'bar'}
self.publisher.publish_sd_draft(sddraft, sd, service_name, folder, initial_state, json)
mock_stage_sd.assert_called_once_with(sddraft=sddraft, sd=sd)
mock_delete.assert_called_once_with(service_name=service_name, folder_name=folder)
mock_upload_sd.assert_called_once_with(sd=sd, initial_state=initial_state)
mock_update.assert_called_once_with(service_name=service_name, folder_name=folder, json=json)
@patch('slap.config.ConfigParser.load_config')
class TestLoadingConfig(TestCase):
def test_load_config_by_path(self, mock_load_config):
config_path = 'path/to/config'
publisher = Publisher('user', 'pwd', config_path)
publisher.config_parser.load_config.assert_called_once_with(config_path)
def test_load_config_as_dict(self, mock_load_config):
publisher = Publisher('user', 'pwd', {'input': 'foo', 'agsUrl': 'bar'})
assert not publisher.config_parser.load_config.called
if __name__ == '__main__':
unittest.main()
|
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions used by multiple converter files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import datetime
import sys
from absl import logging
import six
from six.moves import range
import flatbuffers
from tensorflow.core.protobuf import config_pb2 as _config_pb2
from tensorflow.core.protobuf import graph_debug_info_pb2
from tensorflow.core.protobuf import meta_graph_pb2 as _meta_graph_pb2
from tensorflow.lite.python import schema_py_generated as schema_fb
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs
from tensorflow.lite.python.op_hint import find_all_hinted_output_nodes
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.eager import function
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import error_interpolation as _error_interpolation
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.training.saver import export_meta_graph as _export_meta_graph
# Map of tf.dtypes to TFLite types_flag_pb2.
_MAP_TF_TO_TFLITE_TYPES = {
dtypes.float32: _types_pb2.FLOAT,
dtypes.float16: _types_pb2.FLOAT16,
dtypes.int32: _types_pb2.INT32,
dtypes.uint8: _types_pb2.QUANTIZED_UINT8,
dtypes.int64: _types_pb2.INT64,
dtypes.string: _types_pb2.STRING,
dtypes.bool: _types_pb2.BOOL,
dtypes.int16: _types_pb2.QUANTIZED_INT16,
dtypes.complex64: _types_pb2.COMPLEX64,
dtypes.int8: _types_pb2.INT8,
dtypes.float64: _types_pb2.FLOAT64,
dtypes.complex128: _types_pb2.COMPLEX128,
}
_MAP_TFLITE_ENUM_TO_TF_TYPES = {
0: dtypes.float32,
1: dtypes.float16,
2: dtypes.int32,
3: dtypes.uint8,
4: dtypes.int64,
5: dtypes.string,
6: dtypes.bool,
7: dtypes.int16,
8: dtypes.complex64,
9: dtypes.int8,
10: dtypes.float64,
11: dtypes.complex128,
}
_TFLITE_FILE_IDENTIFIER = b"TFL3"
_TFLITE_MODEL_INPUT_OUTPUT_TYPES = (dtypes.float32, dtypes.int8, dtypes.uint8)
def convert_dtype_to_tflite_type(tf_dtype):
"""Converts tf.dtype to TFLite proto type.
Args:
tf_dtype: tf.dtype
Raises:
ValueError: Unsupported tf.dtype.
Returns:
types_flag_pb2.
"""
result = _MAP_TF_TO_TFLITE_TYPES.get(tf_dtype)
if result is None:
raise ValueError("Unsupported tf.dtype {0}".format(tf_dtype))
return result
def _convert_tflite_enum_type_to_tf_type(tflite_enum_type):
"""Converts tflite enum type (eg: 0) to tf type (eg: tf.float32).
Args:
tflite_enum_type: tflite enum type (eg: 0, that corresponds to float32)
Raises:
ValueError: If an invalid tflite enum type is provided.
Returns:
tf type (eg: tf.float32)
"""
tf_type = _MAP_TFLITE_ENUM_TO_TF_TYPES.get(tflite_enum_type)
if tf_type is None:
raise ValueError(
"Unsupported enum {}. The valid map of enum to tf types is : {}"
.format(tflite_enum_type, _MAP_TFLITE_ENUM_TO_TF_TYPES))
return tf_type
def _get_tf_type_name(tf_type):
"""Converts tf.dtype (eg: tf.float32) to str (eg: "tf.float32")."""
return "tf." + tf_type.name
def get_tensor_name(tensor):
"""Returns name of the input tensor.
Args:
tensor: tf.Tensor
Returns:
str
"""
parts = six.ensure_str(tensor.name).split(":")
if len(parts) > 2:
raise ValueError("Tensor name invalid. Expect 0 or 1 colon, got {0}".format(
len(parts) - 1))
# To be consistent with the tensor naming scheme in tensorflow, we need
# drop the ':0' suffix for the first tensor.
if len(parts) > 1 and parts[1] != "0":
return tensor.name
return parts[0]
def get_tensors_from_tensor_names(graph, tensor_names):
"""Gets the Tensors associated with the `tensor_names` in the provided graph.
Args:
graph: TensorFlow Graph.
tensor_names: List of strings that represent names of tensors in the graph.
Returns:
A list of Tensor objects in the same order the names are provided.
Raises:
ValueError:
tensor_names contains an invalid tensor name.
"""
# Get the list of all of the tensors.
tensor_name_to_tensor = {}
for op in graph.get_operations():
for tensor in op.values():
tensor_name_to_tensor[get_tensor_name(tensor)] = tensor
# Get the tensors associated with tensor_names.
tensors = []
invalid_tensors = []
for name in tensor_names:
if not isinstance(name, six.string_types):
raise ValueError("Invalid type for a tensor name in the provided graph. "
"Expected type for a tensor name is 'str', instead got "
"type '{}' for tensor name '{}'".format(
type(name), name))
tensor = tensor_name_to_tensor.get(name)
if tensor is None:
invalid_tensors.append(name)
else:
tensors.append(tensor)
# Throw ValueError if any user input names are not valid tensors.
if invalid_tensors:
raise ValueError("Invalid tensors '{}' were found.".format(
",".join(invalid_tensors)))
return tensors
def set_tensor_shapes(tensors, shapes):
"""Sets Tensor shape for each tensor if the shape is defined.
Args:
tensors: TensorFlow ops.Tensor.
shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
Raises:
ValueError:
`shapes` contains an invalid tensor.
`shapes` contains an invalid shape for a valid tensor.
"""
if shapes:
tensor_names_to_tensor = {
get_tensor_name(tensor): tensor for tensor in tensors
}
for name, shape in shapes.items():
if name not in tensor_names_to_tensor:
raise ValueError("Invalid tensor \'{}\' found in tensor shapes "
"map.".format(name))
if shape is not None:
tensor = tensor_names_to_tensor[name]
try:
tensor.set_shape(shape)
except ValueError as error:
message = ("The shape of tensor '{0}' cannot be changed from {1} to "
"{2}. {3}".format(name, tensor.shape, shape, str(error)))
raise ValueError(message)
def get_grappler_config(optimizers_list):
"""Creates a tf.compat.v1.ConfigProto for configuring Grappler.
Args:
optimizers_list: List of strings that represents the list of optimizers.
Returns:
tf.ConfigProto.
"""
config = _config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
for optimizer in optimizers_list:
rewrite_options.optimizers.append(optimizer)
return config
def run_graph_optimizations(graph_def,
input_arrays,
output_arrays,
config,
graph=None):
"""Apply standard TensorFlow optimizations to the graph_def.
Args:
graph_def: Frozen GraphDef to be optimized.
input_arrays: List of arrays that are considered inputs of the graph.
output_arrays: List of arrays that are considered outputs of the graph.
config: tf.ConfigProto.
graph: TensorFlow Graph. Required when Eager mode is enabled. (default None)
Returns:
A new, optimized GraphDef.
"""
meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph)
signature = _meta_graph_pb2.SignatureDef()
for array in input_arrays:
signature.inputs[array.name].name = array.name
signature.inputs[array.name].dtype = array.dtype.as_datatype_enum
signature.inputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())
for array in output_arrays:
signature.outputs[array.name].name = array.name
signature.outputs[array.name].dtype = array.dtype.as_datatype_enum
signature.outputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())
meta_graph.signature_def["not_used_key"].CopyFrom(signature)
# We need to add a collection called 'train_op' so that grappler
# knows what the outputs are.
fetch_collection = _meta_graph_pb2.CollectionDef()
for array in input_arrays + output_arrays:
fetch_collection.node_list.value.append(array.name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
return tf_optimizer.OptimizeGraph(config, meta_graph)
def _convert_op_hints_if_present(sess, graph_def, output_tensors,
hinted_outputs_nodes):
if is_frozen_graph(sess):
raise ValueError("Try to convert op hints, needs unfrozen graph.")
output_arrays = [get_tensor_name(tensor) for tensor in output_tensors]
graph_def = tf_graph_util.convert_variables_to_constants(
sess, graph_def, output_arrays + hinted_outputs_nodes)
graph_def = convert_op_hints_to_stubs(graph_def=graph_def)
return graph_def
def freeze_graph(sess, input_tensors, output_tensors):
"""Returns a frozen GraphDef.
Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the
existing GraphDef is returned. The Grappler pass is only run on models that
are frozen in order to inline the functions in the graph.
If OpHints is present, it will try to convert the OpHint graph.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors.
output_tensors: List of output tensors (only .name is used from this).
Returns:
Frozen GraphDef.
"""
# Runs a Grappler pass in order to inline any functions in the graph.
# Asides from inlining any simple function, Grappler will also try to lower
# while loop into switch merge representation which is undesired for Ophints,
# so we simply remove those attributes to prevent Grappler from doing so.
graph_def = _convert_to_constants.disable_lower_using_switch_merge(
sess.graph_def)
config = get_grappler_config(["function"])
graph_def = run_graph_optimizations(
graph_def, input_tensors, output_tensors, config, graph=sess.graph)
# If ophints are present, just convert them.
hinted_outputs_nodes = find_all_hinted_output_nodes(sess)
if hinted_outputs_nodes:
return _convert_op_hints_if_present(sess, graph_def, output_tensors,
hinted_outputs_nodes)
if not is_frozen_graph(sess):
output_node_names = [tensor.name.split(":")[0] for tensor in output_tensors]
return tf_graph_util.convert_variables_to_constants(sess, graph_def,
output_node_names)
else:
return sess.graph_def
def is_frozen_graph(sess):
"""Determines if the graph is frozen.
Determines if a graph has previously been frozen by checking for any
operations of type Variable*. If variables are found, the graph is not frozen.
Args:
sess: TensorFlow Session.
Returns:
Bool.
"""
for op in sess.graph.get_operations():
if six.ensure_str(op.type).startswith("Variable") or six.ensure_str(
op.type).endswith("VariableOp"):
return False
return True
def build_debug_info_func(original_graph):
"""Returns a method to retrieve the `GraphDebugInfo` from the original graph.
Args:
original_graph: The original `Graph` containing all the op stack traces.
Returns:
A function which retrieves the stack traces from the original graph and
converts them to a `GraphDebugInfo` for a given set of nodes.
"""
def f(original_nodes):
"""Function to create `GraphDebugInfo` for the given `original_nodes`."""
if not original_graph:
return None
# For the given nodes, gets all the op definitions in the original graph.
useful_ops = []
for func, name in original_nodes:
try:
if not func:
useful_ops.append((func, original_graph.get_operation_by_name(name)))
else:
sub_func = original_graph._get_function(func) # pylint: disable=protected-access
if isinstance(sub_func, function._EagerDefinedFunction): # pylint: disable=protected-access
useful_ops.append(
(func, sub_func.graph.get_operation_by_name(name)))
else:
sys.stderr.write(
"Use '@tf.function' or '@defun' to decorate the function.")
continue
except KeyError:
# New node created by graph optimizer. No stack trace from source code.
continue
# Convert all the op definitions to stack traces in terms of GraphDebugInfo.
return _error_interpolation.create_graph_debug_info_def(useful_ops)
return f
def convert_debug_info_func(saved_debug_info):
"""Returns a method to retrieve the `GraphDebugInfo` from the original graph.
Args:
saved_debug_info: The `GraphDebugInfo` containing all the debug info.
Returns:
A function which retrieves the stack traces from the original graph and
converts them to a `GraphDebugInfo` for a given set of nodes.
"""
def f(original_nodes):
"""Function to create `GraphDebugInfo` for the given `original_nodes`."""
if not saved_debug_info:
return None
output_debug_info = graph_debug_info_pb2.GraphDebugInfo()
# All the files are copied over, so the index wouldn't be changed.
output_debug_info.files[:] = saved_debug_info.files
# We only copy over the debug info for the input nodes
for func, node in original_nodes:
debug_key = node + "@" + func
output_debug_info.traces[debug_key].CopyFrom(
saved_debug_info.traces[debug_key])
return output_debug_info
return f
def get_debug_info(nodes_to_debug_info_func, converted_graph):
"""Returns the debug info for the original nodes in the `converted_graph`.
Args:
nodes_to_debug_info_func: The method to collect the op debug info for the
nodes.
converted_graph: A `GraphDef` after optimization and transformation.
Returns:
`GraphDebugInfo` for all the original nodes in `converted_graph`.
"""
if not nodes_to_debug_info_func:
return None
# Collect all the debug info nodes from the converted_graph
original_nodes = set()
for node in converted_graph.node:
debug_nodes = node.experimental_debug_info.original_node_names
debug_funcs = node.experimental_debug_info.original_func_names
# If the `original_node_names` are empty, uses the node name directly.
if not debug_nodes:
original_nodes.add(("", node.name))
else:
for i in range(len(debug_nodes)):
debug_func = "" if i >= len(debug_funcs) else debug_funcs[i]
original_nodes.add((debug_func, debug_nodes[i]))
# Convert the nodes to the debug info proto object.
return nodes_to_debug_info_func(original_nodes)
def convert_bytes_to_c_source(data,
array_name,
max_line_width=80,
include_guard=None,
include_path=None,
use_tensorflow_license=False):
"""Returns strings representing a C constant array containing `data`.
Args:
data: Byte array that will be converted into a C constant.
array_name: String to use as the variable name for the constant array.
max_line_width: The longest line length, for formatting purposes.
include_guard: Name to use for the include guard macro definition.
include_path: Optional path to include in the source file.
use_tensorflow_license: Whether to include the standard TensorFlow Apache2
license in the generated files.
Returns:
Text that can be compiled as a C source file to link in the data as a
literal array of values.
Text that can be used as a C header file to reference the literal array.
"""
starting_pad = " "
array_lines = []
array_line = starting_pad
for value in bytearray(data):
if (len(array_line) + 4) > max_line_width:
array_lines.append(array_line + "\n")
array_line = starting_pad
array_line += " 0x%02x," % (value)
if len(array_line) > len(starting_pad):
array_lines.append(array_line + "\n")
array_values = "".join(array_lines)
if include_guard is None:
include_guard = "TENSORFLOW_LITE_UTIL_" + array_name.upper() + "_DATA_H_"
if include_path is not None:
include_line = "#include \"{include_path}\"\n".format(
include_path=include_path)
else:
include_line = ""
if use_tensorflow_license:
license_text = """
/* Copyright {year} The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
""".format(year=datetime.date.today().year)
else:
license_text = ""
source_template = """{license_text}
// This is a TensorFlow Lite model file that has been converted into a C data
// array using the tensorflow.lite.util.convert_bytes_to_c_source() function.
// This form is useful for compiling into a binary for devices that don't have a
// file system.
{include_line}
// We need to keep the data array aligned on some architectures.
#ifdef __has_attribute
#define HAVE_ATTRIBUTE(x) __has_attribute(x)
#else
#define HAVE_ATTRIBUTE(x) 0
#endif
#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))
#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4)))
#else
#define DATA_ALIGN_ATTRIBUTE
#endif
const unsigned char {array_name}[] DATA_ALIGN_ATTRIBUTE = {{
{array_values}}};
const int {array_name}_len = {array_length};
"""
source_text = source_template.format(
array_name=array_name,
array_length=len(data),
array_values=array_values,
license_text=license_text,
include_line=include_line)
header_template = """
{license_text}
// This is a TensorFlow Lite model file that has been converted into a C data
// array using the tensorflow.lite.util.convert_bytes_to_c_source() function.
// This form is useful for compiling into a binary for devices that don't have a
// file system.
#ifndef {include_guard}
#define {include_guard}
extern const unsigned char {array_name}[];
extern const int {array_name}_len;
#endif // {include_guard}
"""
header_text = header_template.format(
array_name=array_name,
include_guard=include_guard,
license_text=license_text)
return source_text, header_text
def _convert_model_from_bytearray_to_object(model_bytearray):
"""Converts a tflite model from a bytearray into a parsable object."""
model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)
model_object = schema_fb.ModelT.InitFromObj(model_object)
model_object = copy.deepcopy(model_object)
model_object.subgraphs[0].inputs[0] = model_object.subgraphs[0].inputs[0]
return model_object
def _convert_model_from_object_to_bytearray(model_object):
"""Converts a tflite model from a parsable object into a bytearray."""
# Initial size of the buffer, which will grow automatically if needed
builder = flatbuffers.Builder(1024)
model_offset = model_object.Pack(builder)
builder.Finish(model_offset, file_identifier=_TFLITE_FILE_IDENTIFIER)
return bytes(builder.Output())
def _remove_tensors_from_model(model, remove_tensors_idxs):
"""Remove tensors from model."""
if not remove_tensors_idxs:
return
if len(model.subgraphs) > 1:
raise ValueError("Model must only have one subgraph. Instead, it has "
"{} subgraphs.".format(len(model.subgraphs)))
subgraph = model.subgraphs[0]
tensors = subgraph.tensors
operators = subgraph.operators
logging.debug("Removing tensors at indices : %s", remove_tensors_idxs)
# An optimized check to validate if "remove_tensors_idxs" (eg: [4,5,6]) is an
# exact subset, with ordering, of "tensors" indices (eg: [0,1,2,3,4,5,6]).
if min(remove_tensors_idxs) == len(tensors) - len(remove_tensors_idxs):
logging.debug("Removing tensors only at the end of the tensor list")
del tensors[min(remove_tensors_idxs):]
else:
logging.debug("Removing tensors requires updating the model")
# Map the old tensor indices to new tensor indices
d_old_to_new_tensors = {}
left_shift_by = 0
for idx in range(len(tensors)):
if idx in remove_tensors_idxs:
left_shift_by += 1
else:
d_old_to_new_tensors[idx] = idx - left_shift_by
logging.debug("Old to new tensors map: %s", d_old_to_new_tensors.__str__())
# Update tensor indices referenced throughout the model
def update_tensors(tensor_idxs):
for i, ti in enumerate(tensor_idxs):
tensor_idxs[i] = d_old_to_new_tensors.get(ti, -1)
update_tensors(subgraph.inputs)
update_tensors(subgraph.outputs)
for op in operators:
update_tensors(op.inputs)
update_tensors(op.outputs)
# Delete the tensors
for idx in sorted(remove_tensors_idxs, reverse=True):
tensors.pop(idx)
logging.debug("Removed tensors marked for deletion")
def _modify_model_input_type(model, inference_input_type=dtypes.float32):
"""Modify model input type."""
if inference_input_type == dtypes.float32:
return
if inference_input_type not in _TFLITE_MODEL_INPUT_OUTPUT_TYPES:
raise ValueError(
"Unsupported `inference_output_type` value. Expected to be in {}, "
"instead got {}.".format(tuple(_get_tf_type_name(t) for t in
_TFLITE_MODEL_INPUT_OUTPUT_TYPES),
_get_tf_type_name(inference_input_type)))
subgraph = model.subgraphs[0]
tensors = subgraph.tensors
operators = subgraph.operators
# Find all quantize operators
quant_opcode_idxs = []
for idx, opcode in enumerate(model.operatorCodes):
if opcode.builtinCode == schema_fb.BuiltinOperator.QUANTIZE:
quant_opcode_idxs.append(idx)
if not quant_opcode_idxs:
raise ValueError("Model input is not quantized.")
# Ensure that the model input is quantized
input_quant_ops = []
for op in operators:
# Check if the operator quantizes an input
if op.opcodeIndex in quant_opcode_idxs and op.inputs[0] in subgraph.inputs:
# If found, validate the operator input/output tensor types
float_tensor, int_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]]
if float_tensor.type != schema_fb.TensorType.FLOAT32:
raise ValueError(
"Model input type must be tf.float32. Expected type for tensor "
"with name '{}' is tf.float32, instead type is {}".format(
float_tensor.name, _get_tf_type_name(
_convert_tflite_enum_type_to_tf_type(float_tensor.type))))
if int_tensor.type != schema_fb.TensorType.INT8:
raise ValueError(
"Model input is not quantized. Expected type for tensor "
"with name '{}' is tf.int8, instead type is {}".format(
int_tensor.name, _get_tf_type_name(
_convert_tflite_enum_type_to_tf_type(int_tensor.type))))
input_quant_ops.append(op)
if len(subgraph.inputs) != len(input_quant_ops):
raise ValueError("Model input is not quantized.")
# Modify model input type
if inference_input_type == dtypes.uint8:
# Change quant op (float to int8) to quant op (uint8 to int8)
for op in input_quant_ops:
int8_quantization = tensors[op.outputs[0]].quantization
uint8_quantization = schema_fb.QuantizationParametersT()
uint8_quantization.scale = [int8_quantization.scale[0]]
uint8_quantization.zeroPoint = [int8_quantization.zeroPoint[0] + 128]
tensors[op.inputs[0]].quantization = uint8_quantization
tensors[op.inputs[0]].type = schema_fb.TensorType.UINT8
elif inference_input_type == dtypes.int8:
# Remove the inputs and the quant operator
remove_tensors_idxs = set()
for op in input_quant_ops:
subgraph.inputs[subgraph.inputs == op.inputs[0]] = op.outputs[0]
remove_tensors_idxs.add(op.inputs[0])
operators.remove(op)
# Remove tensors marked for deletion.
_remove_tensors_from_model(model, remove_tensors_idxs)
else:
raise ValueError(
"Unsupported `inference_input_type` value. Expected to be in {}, "
"instead got {}.".format(tuple(_get_tf_type_name(t) for t in
_TFLITE_MODEL_INPUT_OUTPUT_TYPES),
_get_tf_type_name(inference_input_type)))
def _modify_model_output_type(model, inference_output_type=dtypes.float32):
"""Modify model output type."""
if inference_output_type == dtypes.float32:
return
if inference_output_type not in _TFLITE_MODEL_INPUT_OUTPUT_TYPES:
raise ValueError(
"Unsupported `inference_output_type` value. Expected to be in {}, "
"instead got {}.".format(tuple(_get_tf_type_name(t) for t in
_TFLITE_MODEL_INPUT_OUTPUT_TYPES),
_get_tf_type_name(inference_output_type)))
subgraph = model.subgraphs[0]
tensors = subgraph.tensors
operators = subgraph.operators
# Find all dequantize operators
dequant_opcode_idxs = []
for idx, opcode in enumerate(model.operatorCodes):
if opcode.builtinCode == schema_fb.BuiltinOperator.DEQUANTIZE:
dequant_opcode_idxs.append(idx)
if not dequant_opcode_idxs:
raise ValueError("Model output is not dequantized.")
# Ensure that the model output is dequantized
output_dequant_ops = []
for op in operators:
# Check if the operator dequantizes an output
if op.opcodeIndex in dequant_opcode_idxs and \
op.outputs[0] in subgraph.outputs:
# If found, validate the operator input/output tensor types
int_tensor, float_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]]
if float_tensor.type != schema_fb.TensorType.FLOAT32:
raise ValueError(
"Model output type must be tf.float32. Expected type for tensor "
"with name '{}' is tf.float32, instead type is {}".format(
float_tensor.name, _get_tf_type_name(
_convert_tflite_enum_type_to_tf_type(float_tensor.type))))
if int_tensor.type != schema_fb.TensorType.INT8:
raise ValueError(
"Model output is not dequantized. Expected type for tensor "
"with name '{}' is tf.int8, instead type is {}".format(
int_tensor.name, _get_tf_type_name(
_convert_tflite_enum_type_to_tf_type(int_tensor.type))))
output_dequant_ops.append(op)
if len(subgraph.outputs) != len(output_dequant_ops):
raise ValueError("Model output is not dequantized.")
# Modify model output type
if inference_output_type == dtypes.uint8:
# Find a quantize operator
quant_opcode_idx = -1
for idx, opcode in enumerate(model.operatorCodes):
if opcode.builtinCode == schema_fb.BuiltinOperator.QUANTIZE:
quant_opcode_idx = idx
break
# Create a quantize operator, if none exist
if quant_opcode_idx == -1:
quant_op = schema_fb.OperatorCodeT()
quant_op.builtinCode = schema_fb.BuiltinOperator.QUANTIZE
model.operatorCodes.append(quant_op)
quant_opcode_idx = len(model.operatorCodes) - 1
# Change dequant op (int8 to float) to quant op (int8 to uint8)
for op in output_dequant_ops:
op.opcodeIndex = quant_opcode_idx
int8_quantization = tensors[op.inputs[0]].quantization
uint8_quantization = schema_fb.QuantizationParametersT()
uint8_quantization.scale = [int8_quantization.scale[0]]
uint8_quantization.zeroPoint = [int8_quantization.zeroPoint[0] + 128]
tensors[op.outputs[0]].quantization = uint8_quantization
tensors[op.outputs[0]].type = schema_fb.TensorType.UINT8
elif inference_output_type == dtypes.int8:
# Remove the outputs and the dequant operator
remove_tensors_idxs = set()
for op in output_dequant_ops:
subgraph.outputs[subgraph.outputs == op.outputs[0]] = op.inputs[0]
remove_tensors_idxs.add(op.outputs[0])
operators.remove(op)
# Remove tensors marked for deletion.
_remove_tensors_from_model(model, remove_tensors_idxs)
else:
raise ValueError(
"Unsupported `inference_output_type` value. Expected to be in {}, "
"instead got {}.".format(tuple(_get_tf_type_name(t) for t in
_TFLITE_MODEL_INPUT_OUTPUT_TYPES),
_get_tf_type_name(inference_output_type)))
def modify_model_io_type(
model, inference_input_type=dtypes.float32,
inference_output_type=dtypes.float32):
"""Modify the input/output type of a tflite model.
Args:
model: A tflite model.
inference_input_type: tf.DType representing modified input type.
(default tf.float32. If model input is int8 quantized, it must be in
{tf.float32, tf.int8, tf.uint8}, else it must be tf.float32)
inference_output_type: tf.DType representing modified output type.
(default tf.float32. If model output is int8 dequantized, it must be in
{tf.float32, tf.int8, tf.uint8}, else it must be tf.float32)
Returns:
A tflite model with modified input/output type.
Raises:
ValueError: If `inference_input_type`/`inference_output_type` is unsupported
or a supported integer type is specified for a model whose input/output is
not quantized/dequantized.
RuntimeError: If the modification was unsuccessful.
"""
if inference_input_type == dtypes.float32 and \
inference_output_type == dtypes.float32:
return model
model_object = _convert_model_from_bytearray_to_object(model)
if len(model_object.subgraphs) > 1:
raise ValueError("Model must only have one subgraph. Instead, it has "
"{} subgraphs.".format(len(model_object.subgraphs)))
_modify_model_input_type(model_object, inference_input_type)
_modify_model_output_type(model_object, inference_output_type)
return _convert_model_from_object_to_bytearray(model_object)
|
|
from valueDict import ValueDict
from handEvaluator import evalHand
from decimal import *
import cardsDict
from math import exp, tanh, log10, log, pow, sqrt, fabs, copysign
from util import hypGeoProb, multiHypGeoProb
from operator import mul
from numpy import zeros, count_nonzero, prod
from scipy import special
from collections import Counter
import time
_ACE_FLUSH = 7463 - (815+323)/2
_KING_FLUSH = 7463 - (1144+816)/2
_QUEEN_FLUSH = 7463 - (1353+1145)/2
_JACK_FLUSH = 7463 - (1477+1354)/2
_10_FLUSH = 7463 - (1546+1478)/2
_9_FLUSH = 7463 - (1581+1547)/2
_8_FLUSH = 7463 - (1595+1582)/2
_7_FLUSH = 7463 - (1599+1596)/2
_FLUSH_VALUES = {0 : _7_FLUSH,
1 : _7_FLUSH,
2 : _7_FLUSH,
3 : _7_FLUSH,
4 : _7_FLUSH,
5 : _7_FLUSH,
6 : _8_FLUSH, 7 : _9_FLUSH, 8 : _10_FLUSH, 9 : _JACK_FLUSH, 10 : _QUEEN_FLUSH, 11 : _KING_FLUSH, 12 : _ACE_FLUSH}
_ACE_STRAIGHT = 7463 - 1600
_KING_STRAIGHT = 7463 - 1601
_QUEEN_STRAIGHT = 7463 - 1602
_JACK_STRAIGHT = 7463 - 1603
_10_STRAIGHT = 7463 - 1604
_9_STRAIGHT = 7463 - 1605
_8_STRAIGHT = 7463 - 1606
_7_STRAIGHT = 7463 - 1607
_6_STRAIGHT = 7463 - 1608
_5_STRAIGHT = 7463 - 1609
_STRAIGHT_VALUES = { 0 : _5_STRAIGHT,
1 : _5_STRAIGHT,
2 : _5_STRAIGHT,
3 : _5_STRAIGHT,
4 : _6_STRAIGHT,
5 : _7_STRAIGHT,
6 : _8_STRAIGHT, 7 : _9_STRAIGHT, 8 : _10_STRAIGHT, 9 : _JACK_STRAIGHT, 10 : _QUEEN_STRAIGHT, 11 : _KING_STRAIGHT, 12 : _ACE_STRAIGHT}
valDict = ValueDict()
def greedyEval2(agentIndex, state):
agentHands = state.getHands(agentIndex)
opponentHands = state.getHands((agentIndex+1)%2)
diff = []
for i in range(5):
hand = cardsDict._HANDS[i]
myHand = eval(agentHands[hand], agentIndex, state)
oppHand = eval(opponentHands[hand], (agentIndex + 1)%2, state)
diff.append(log(float(myHand)/float(oppHand)) )
retVal = sum(x for x in diff)
return retVal
def greedyEval3(agentIndex, state):
agentHands = state.getHands(agentIndex)
opponentHands = state.getHands((agentIndex+1)%2)
diff = []
for i in range(5):
hand = cardsDict._HANDS[i]
myHand = eval(agentHands[hand], agentIndex, state)
oppHand = eval(opponentHands[hand], (agentIndex + 1)%2, state)
diff.append(log(float(myHand)/float(oppHand)) )
retVal = sum(x for x in diff)
return retVal
def fastGreedyEval2(agentIndex, state):
agentHands = state.getHands(agentIndex)
opponentHands = state.getHands((agentIndex+1)%2)
unseenCards = state.getAgentState(agentIndex).getUnseenCards().asList()
unseenArr = zeros(52, dtype = 'int')
for i in unseenCards:
unseenArr[i-1] = 1
unseenArr.shape = (13,4)
unseenSize = len(unseenCards)
diff = []
for i in range(5):
hand = cardsDict._HANDS[i]
myHand = fastEval(agentHands[hand], agentIndex, state, unseenArr, unseenSize)
oppHand = fastEval(opponentHands[hand], (agentIndex + 1)%2, state, unseenArr, unseenSize)
diff.append(log(float(myHand)/float(oppHand) ) )
retVal = sum(x for x in diff)
return retVal
def fastEval(hand, agentIndex, state, unseenArr = None, unseenSize = None):
global valDict
size = len(hand)
if size == 5 :
return evalHand(hand)
ranks = tuple(cardsDict.RANK[cardsDict.INVERTED_INTVAL[card] ] for card in hand)
suits = set(cardsDict.SUIT[cardsDict.INVERTED_INTVAL[card] ] for card in hand )
suitSize = len(suits)
minVal = min(ranks)
maxRank = maxVal = max(ranks)
if maxVal == 12 and minVal < 4:
tmpRanks = sorted(ranks)
maxVal = max(tmpRanks[:-1])
minVal = -1
if suitSize == 1:
suit = suits.pop()
suitsLeft = count_nonzero(unseenArr[:, suit])
flushProb = float(suitsLeft)/float(unseenSize)
else :
flushProb = 0
if maxVal - minVal < 5 :
straightProb = getFastStraightProb(ranks, unseenArr, unseenSize, minVal, maxVal)
else:
straightProb = 0
return ((1-flushProb - straightProb)*valDict.dicts[size-1][ranks] + (flushProb * _FLUSH_VALUES[maxRank] + straightProb * _STRAIGHT_VALUES[maxRank])*size/5.0)
def eval(hand, agentIndex, state) :
global valDict
size = len(hand)
if size == 5 :
return evalHand(hand)
ranks = tuple(cardsDict.RANK[cardsDict.INVERTED_INTVAL[card] ] for card in hand )
suits = set(cardsDict.SUIT[cardsDict.INVERTED_INTVAL[card] ] for card in hand )
suitSize = len(suits)
minVal = min(ranks)
maxRank = maxVal = max(ranks)
if maxVal == 12 and minVal < 4:
tmpRanks = sorted(ranks)
maxVal = max(tmpRanks[:-1])
minVal = -1
flushProb = 0
straightProb = 0
if suitSize == 1 or maxVal - minVal < 5 :
ranksSet = []
unseenCards = state.getAgentState(agentIndex).getUnseenCards().asList()
unseenArr = zeros(52, dtype = 'int')
for i in unseenCards:
unseenArr[i-1] = 1
unseenArr.shape = (13,4)
flushProb = getFlushProb(list(suits)[0], unseenArr, state, len(unseenCards), size) if suitSize == 1 else 0
for rank in ranks:
if rank not in ranksSet:
ranksSet.append(rank)
straightProb = getStraightProb(ranks, unseenArr, state, len(unseenCards), size, minVal, maxVal) if (maxVal - minVal < 5 and len(ranksSet) == size) else 0
return ((1-flushProb - straightProb)*valDict.dicts[size-1][ranks] + (flushProb * _FLUSH_VALUES[maxRank] + straightProb * _STRAIGHT_VALUES[maxRank])*size/5.0)
def getFlushProb(suit, unseenArr, state, unseenSize, handSize) :
"""
unseenCards - a 13X4 numpy matrix
"""
#N - population, K - successful cards, t - total moves in game until next time this hand is available
# s - number of draws that will be available for this hand ,k - successful draws needed
#start = time.clock()
N = unseenSize
K = count_nonzero(unseenArr[:, suit])
t = -state.data.totPlyNum%10
n = (40 - (state.data.totPlyNum + t))/2
k = 5 - handSize
return hypGeoProb(N,K,n,k)
def getStraightProb(ranks, unseenArr, state, unseenSize, handSize, minRank, maxRank) :
"""
ranks - tuple of ranks in hand
unseenCards - a 13X4 numpy matrix
"""
#N - population, t - total moves in game until next time this hand is available
# s - number of draws that will be available for this hand
# l - number of cards needed to complete hand ( 0<=l<=4 )
N = unseenSize
t = -state.data.totPlyNum%10
s = (40 - (state.data.totPlyNum + t))/2
l = 5 - handSize
leftCardsLists = []
for lowRank in range(max(-1, maxRank - 4) , minRank + 1):
highRank = min(12, lowRank + 4)
if highRank < maxRank or lowRank > highRank - 4 : continue
tmpLeftCardsList = []
for rank in range(lowRank, highRank + 1 ):
tmpRank = rank if rank != -1 else 12
if tmpRank not in ranks:
tmpLeftCardsList.append(count_nonzero(unseenArr[rank, :]) )
leftCardsLists.append(tmpLeftCardsList)
tmpSum = 0.0
for leftCardsList in leftCardsLists :
tmpSum += multiHypGeoProb(N, leftCardsList, s, l)
return tmpSum
def getFastStraightProb(ranks, unseenArr, unseenSize, minRank, maxRank) :
tmpSum = 0.0
for lowRank in range(max(-1, maxRank - 4) , minRank + 1):
highRank = min(12, lowRank + 4)
if highRank < maxRank or lowRank > highRank - 4 : continue
tmpLeftCardsList = []
for rank in range(lowRank, highRank + 1 ):
tmpRank = rank if rank != -1 else 12
if tmpRank not in ranks:
tmpLeftCardsList.append(count_nonzero(unseenArr[rank, :])/float(unseenSize) )
tmpSum += reduce(mul, tmpLeftCardsList)
return tmpSum
|
|
import locale
import os
import os.path
import re
from contextlib import suppress
from copy import copy
from datetime import date, datetime, time
from typing import TYPE_CHECKING, Any, AnyStr, List, Mapping, Optional, Type, Union, cast
from unicodedata import normalize
import jinja2.filters
from dateutil import parser as dateutil_parse
from jinja2 import (
ChoiceLoader,
Environment,
FileSystemLoader,
PackageLoader,
StrictUndefined,
Template,
TemplateNotFound,
TemplateSyntaxError,
)
from jinja2.nativetypes import NativeTemplate
from loguru import logger
from flexget.event import event
from flexget.utils.lazy_dict import LazyDict
from flexget.utils.pathscrub import pathscrub
from flexget.utils.tools import split_title_year
if TYPE_CHECKING:
from flexget.entry import Entry
from flexget.manager import Manager
from flexget.task import Task
logger = logger.bind(name='utils.template')
# The environment will be created after the manager has started
environment: Optional['FlexGetEnvironment'] = None
class RenderError(Exception):
"""Error raised when there is a problem with jinja rendering."""
def filter_pathbase(val: Optional[str]) -> str:
"""Base name of a path."""
return os.path.basename(val or '')
def filter_pathname(val: Optional[str]) -> str:
"""Base name of a path, without its extension."""
return os.path.splitext(os.path.basename(val or ''))[0]
def filter_pathext(val: Optional[str]) -> str:
"""Extension of a path (including the '.')."""
return os.path.splitext(val or '')[1]
def filter_pathdir(val: Optional[str]) -> str:
"""Directory containing the given path."""
return os.path.dirname(val or '')
def filter_pathscrub(val: str, os_mode: str = None) -> str:
"""Replace problematic characters in a path."""
if not isinstance(val, str):
return val
return pathscrub(val, os_mode)
def filter_re_replace(val: AnyStr, pattern: str, repl: str) -> str:
"""Perform a regexp replacement on the given string."""
return re.sub(pattern, repl, str(val))
def filter_re_search(val, pattern: str):
"""Perform a search for given regexp pattern, return the matching portion of the text."""
if not isinstance(val, str):
return val
result = re.search(pattern, val, re.IGNORECASE)
if result:
return result.group(0)
return ''
def filter_formatdate(val, format_str):
"""Returns a string representation of a datetime object according to format string."""
encoding = locale.getpreferredencoding()
if not isinstance(val, (datetime, date, time)):
return val
return val.strftime(format_str)
def filter_parsedate(val):
"""Attempts to parse a date according to the rules in ISO 8601 and RFC 2822"""
return dateutil_parse.parse(val)
def filter_date_suffix(date_str: str):
"""Returns a date suffix for a given date"""
day = int(date_str[-2:])
if 4 <= day <= 20 or 24 <= day <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][day % 10 - 1]
return date_str + suffix
def filter_format_number(val, places: int = None, grouping: bool = True) -> str:
"""Formats a number according to the user's locale."""
if not isinstance(val, (int, float)):
return val
if places is not None:
format_str = f'%.{places}f'
elif isinstance(val, int):
format_str = '%d'
else:
format_str = '%.02f'
locale.setlocale(locale.LC_ALL, '')
return locale.format_string(format_str, val, grouping)
def filter_pad(val: Union[int, str], width: int, fillchar: str = '0') -> str:
"""Pads a number or string with fillchar to the specified width."""
return str(val).rjust(width, fillchar)
def filter_to_date(date_time_val):
"""Returns the date from any date-time object"""
if not isinstance(date_time_val, (datetime, date, time)):
return date_time_val
return date_time_val.date()
def filter_default(value, default_value: str = '', boolean: bool = True) -> str:
"""Override the built-in Jinja default filter to set the `boolean` param to True by default"""
return jinja2.filters.do_default(value, default_value, boolean)
filter_d = filter_default
def filter_asciify(text: str) -> str:
"""Siplify text"""
if not isinstance(text, str):
return text
result = normalize('NFD', text)
result = result.encode('ascii', 'ignore')
result = result.decode("utf-8")
result = str(result)
return result
def filter_strip_symbols(text: str) -> str:
"""Strip Symbols text"""
if not isinstance(text, str):
return text
# Symbols that should be converted to white space
result = re.sub(r'[ \(\)\-_\[\]\.]+', ' ', text)
# Leftovers
result = re.sub(r"[^\w\d\s]", "", result, flags=re.UNICODE)
# Replace multiple white spaces with one
result = ' '.join(result.split())
return result
def filter_strip_year(name: str) -> str:
return split_title_year(name).title
def filter_get_year(name: str) -> str:
return split_title_year(name).year
def is_fs_file(pathname: Union[str, os.PathLike]) -> bool:
"""Test whether item is existing file in filesystem"""
return os.path.isfile(pathname)
def is_fs_dir(pathname: Union[str, os.PathLike]) -> bool:
"""Test whether item is existing directory in filesystem"""
return os.path.isdir(pathname)
def is_fs_link(pathname: Union[str, os.PathLike]) -> bool:
"""Test whether item is existing link in filesystem"""
return os.path.islink(pathname)
class FlexGetTemplate(Template):
"""Adds lazy lookup support when rendering templates."""
def new_context(self, vars=None, shared=False, locals=None):
context = super().new_context(vars, shared, locals)
context.parent = LazyDict(context.parent)
return context
class FlexGetNativeTemplate(FlexGetTemplate, NativeTemplate):
"""Lazy lookup support and native python return types."""
class FlexGetEnvironment(Environment):
"""Environment with template_class support"""
template_class: Type[FlexGetTemplate]
@event('manager.initialize')
def make_environment(manager: 'Manager') -> None:
"""Create our environment and add our custom filters"""
global environment
environment = FlexGetEnvironment(
undefined=StrictUndefined,
loader=ChoiceLoader(
[
PackageLoader('flexget'),
FileSystemLoader(os.path.join(manager.config_base, 'templates')),
]
),
extensions=['jinja2.ext.loopcontrols'],
)
environment.template_class = FlexGetTemplate
for name, filt in list(globals().items()):
if name.startswith('filter_'):
environment.filters[name.split('_', 1)[1]] = filt
for name, test in list(globals().items()):
if name.startswith('is_'):
environment.tests[name.split('_', 1)[1]] = test
def list_templates(extensions: List[str] = None) -> List[str]:
"""Returns all templates names that are configured under environment loader dirs"""
if environment is None or not hasattr(environment, 'loader'):
return []
return environment.list_templates(extensions=extensions)
def get_filters() -> dict:
"""Returns all built-in and custom Jinja filters in a dict
The key is the name, and the value is the filter func
"""
if environment is None or not hasattr(environment, 'loader'):
return {}
return environment.filters
def get_template(template_name: str, scope: Optional[str] = 'task') -> FlexGetTemplate:
"""Loads a template from disk. Looks in both included plugins and users custom scope dir."""
if not template_name.endswith('.template'):
template_name += '.template'
locations = []
if scope:
locations.append(scope + '/' + template_name)
locations.append(template_name)
for location in locations:
if environment is not None:
with suppress(TemplateNotFound):
return cast(FlexGetTemplate, environment.get_template(location))
else:
err = f'Template not found in templates dir: {template_name}'
if scope:
err += f' ({scope})'
raise ValueError(err)
def render(template: Union[FlexGetTemplate, str], context: Mapping, native: bool = False) -> str:
"""
Renders a Template with `context` as its context.
:param template: Template or template string to render.
:param context: Context to render the template from.
:param native: If True, and the rendering result can be all native python types, not just strings.
:return: The rendered template text.
"""
if isinstance(template, str) and environment is not None:
template_class = None
if native:
template_class = FlexGetNativeTemplate
try:
template = cast(
FlexGetTemplate, environment.from_string(template, template_class=template_class)
)
except TemplateSyntaxError as e:
raise RenderError(f'Error in template syntax: {e.message}')
try:
template = cast(FlexGetTemplate, template)
result = template.render(context)
except Exception as e:
error = RenderError(f'({type(e).__name__}) {e}')
logger.debug(f'Error during rendering: {error}')
raise error
return result
def render_from_entry(
template: Union[FlexGetTemplate, str], entry: 'Entry', native: bool = False
) -> str:
"""Renders a Template or template string with an Entry as its context."""
# Make a copy of the Entry so we can add some more fields
variables = copy(entry.store)
variables['now'] = datetime.now()
# Add task name to variables, usually it's there because metainfo_task plugin, but not always
if hasattr(entry, 'task') and entry.task is not None:
if 'task' not in variables:
variables['task'] = entry.task.name
# Since `task` has different meaning between entry and task scope, the `task_name` field is create to be
# consistent
variables['task_name'] = entry.task.name
return render(template, variables, native=native)
def render_from_task(template: Union[FlexGetTemplate, str], task: 'Task') -> str:
"""
Renders a Template with a task as its context.
:param template: Template or template string to render.
:param task: Task to render the template from.
:return: The rendered template text.
"""
variables = {'task': task, 'now': datetime.now(), 'task_name': task.name}
return render(template, variables)
def evaluate_expression(expression: str, context: Mapping) -> Any:
"""
Evaluate a jinja `expression` using a given `context` with support for `LazyDict`s (`Entry`s.)
:param str expression: A jinja expression to evaluate
:param context: dictlike, supporting LazyDicts
"""
if environment is not None:
compiled_expr = environment.compile_expression(expression)
# If we have a LazyDict, grab the underlying store. Our environment supports LazyFields directly
if isinstance(context, LazyDict):
context = context.store
return compiled_expr(**context)
return None
|
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today Serpent Consulting Services PVT. LTD.
# (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# ---------------------------------------------------------------------------
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.exceptions import except_orm, ValidationError
from dateutil.relativedelta import relativedelta
from openerp import models, fields, api, _
import datetime
import time
class HotelFolio(models.Model):
_inherit = 'hotel.folio'
_order = 'reservation_id desc'
reservation_id = fields.Many2one(comodel_name='hotel.reservation',
string='Reservation Id')
@api.multi
def write(self, vals):
"""
Overrides orm write method.
@param self: The object pointer
@param vals: dictionary of fields value.
"""
folio_write = super(HotelFolio, self).write(vals)
reservation_line_obj = self.env['hotel.room.reservation.line']
for folio_obj in self:
if folio_obj.reservation_id:
for reservation in folio_obj.reservation_id:
reservation_obj = (reservation_line_obj.search
([('reservation_id', '=',
reservation.id)]))
if len(reservation_obj) == 1:
for line_id in reservation.reservation_line:
line_id = line_id.reserve
for room_id in line_id:
vals = {'room_id': room_id.id,
'check_in': folio_obj.checkin_date,
'check_out': folio_obj.checkout_date,
'state': 'assigned',
'reservation_id': reservation.id,
}
reservation_obj.write(vals)
return folio_write
class HotelReservation(models.Model):
_name = "hotel.reservation"
_rec_name = "reservation_no"
_description = "Reservation"
_order = 'reservation_no desc'
_inherit = ['mail.thread', 'ir.needaction_mixin']
reservation_no = fields.Char('Reservation No', size=64, readonly=True)
date_order = fields.Datetime('Date Ordered', required=True, readonly=True,
states={'draft': [('readonly', False)]},
default=(lambda *a:
time.strftime
(DEFAULT_SERVER_DATETIME_FORMAT)))
warehouse_id = fields.Many2one('stock.warehouse', 'Hotel', readonly=True,
required=True, default=1,
states={'draft': [('readonly', False)]})
partner_id = fields.Many2one('res.partner', 'Guest Name', readonly=True,
required=True,
states={'draft': [('readonly', False)]})
pricelist_id = fields.Many2one('product.pricelist', 'Scheme',
required=True, readonly=True,
states={'draft': [('readonly', False)]},
help="Pricelist for current reservation.")
partner_invoice_id = fields.Many2one('res.partner', 'Invoice Address',
readonly=True,
states={'draft':
[('readonly', False)]},
help="Invoice address for "
"current reservation.")
partner_order_id = fields.Many2one('res.partner', 'Ordering Contact',
readonly=True,
states={'draft':
[('readonly', False)]},
help="The name and address of the "
"contact that requested the order "
"or quotation.")
partner_shipping_id = fields.Many2one('res.partner', 'Delivery Address',
readonly=True,
states={'draft':
[('readonly', False)]},
help="Delivery address"
"for current reservation. ")
checkin = fields.Datetime('Expected-Date-Arrival', required=True,
readonly=True,
states={'draft': [('readonly', False)]})
checkout = fields.Datetime('Expected-Date-Departure', required=True,
readonly=True,
states={'draft': [('readonly', False)]})
adults = fields.Integer('Adults', size=64, readonly=True,
states={'draft': [('readonly', False)]},
help='List of adults there in guest list. ')
children = fields.Integer('Children', size=64, readonly=True,
states={'draft': [('readonly', False)]},
help='Number of children there in guest list.')
reservation_line = fields.One2many('hotel_reservation.line', 'line_id',
'Reservation Line',
help='Hotel room reservation details.')
state = fields.Selection([('draft', 'Draft'), ('confirm', 'Confirm'),
('cancel', 'Cancel'), ('done', 'Done')],
'State', readonly=True,
default=lambda *a: 'draft')
folio_id = fields.Many2many('hotel.folio', 'hotel_folio_reservation_rel',
'order_id', 'invoice_id', string='Folio')
dummy = fields.Datetime('Dummy')
@api.constrains('reservation_line', 'adults', 'children')
def check_reservation_rooms(self):
'''
This method is used to validate the reservation_line.
-----------------------------------------------------
@param self: object pointer
@return: raise a warning depending on the validation
'''
for reservation in self:
if len(reservation.reservation_line) == 0:
raise ValidationError(_('Please Select Rooms \
For Reservation.'))
for rec in reservation.reservation_line:
if len(rec.reserve) == 0:
raise ValidationError(_('Please Select Rooms \
For Reservation.'))
cap = 0
for room in rec.reserve:
cap += room.capacity
if (self.adults + self.children) > cap:
raise ValidationError(_('Room Capacity \
Exceeded \n Please Select Rooms According to \
Members Accomodation.'))
@api.model
def _needaction_count(self, domain=None):
"""
Show a count of draft state reservations on the menu badge.
"""
return self.search_count([('state', '=', 'draft')])
# @api.onchange('date_order', 'checkin')
# def on_change_checkin(self):
# '''
# When you change date_order or checkin it will check whether
# Checkin date should be greater than the current date
# ------------------------------------------------------------
# @param self: object pointer
# @return: raise warning depending on the validation
# '''
# if self.date_order and self.checkin:
# if self.checkin < self.date_order:
# raise except_orm(_('Warning'), _('Checkin date should be \
# greater than the current date.'))
@api.constrains('checkin', 'checkout')
def check_in_out_dates(self):
"""
When date_order is less then checkin date or
Checkout date should be greater than the checkin date.
"""
if self.checkout and self.checkin:
if self.checkin < self.date_order:
raise except_orm(_('Warning'), _('Checkin date should be \
greater than the current date.'))
if self.checkout < self.checkin:
raise except_orm(_('Warning'), _('Checkout date \
should be greater than Checkin date.'))
@api.onchange('checkout', 'checkin')
def on_change_checkout(self):
'''
When you change checkout or checkin it will check whether
Checkout date should be greater than Checkin date
and update dummy field
-----------------------------------------------------------
@param self: object pointer
@return: raise warning depending on the validation
'''
checkout_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
checkin_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if not (checkout_date and checkin_date):
return {'value': {}}
delta = datetime.timedelta(days=1)
dat_a = time.strptime(checkout_date,
DEFAULT_SERVER_DATETIME_FORMAT)[:5]
addDays = datetime.datetime(*dat_a) + delta
self.dummy = addDays.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
@api.onchange('partner_id')
def onchange_partner_id(self):
'''
When you change partner_id it will update the partner_invoice_id,
partner_shipping_id and pricelist_id of the hotel reservation as well
---------------------------------------------------------------------
@param self: object pointer
'''
if not self.partner_id:
self.partner_invoice_id = False
self.partner_shipping_id = False
self.partner_order_id = False
else:
addr = self.partner_id.address_get(['delivery', 'invoice',
'contact'])
self.partner_invoice_id = addr['invoice']
self.partner_order_id = addr['contact']
self.partner_shipping_id = addr['delivery']
self.pricelist_id = self.partner_id.property_product_pricelist.id
@api.multi
def confirmed_reservation(self):
"""
This method create a new recordset for hotel room reservation line
------------------------------------------------------------------
@param self: The object pointer
@return: new record set for hotel room reservation line.
"""
reservation_line_obj = self.env['hotel.room.reservation.line']
for reservation in self:
self._cr.execute("select count(*) from hotel_reservation as hr "
"inner join hotel_reservation_line as hrl on \
hrl.line_id = hr.id "
"inner join hotel_reservation_line_room_rel as \
hrlrr on hrlrr.room_id = hrl.id "
"where (checkin,checkout) overlaps \
( timestamp %s, timestamp %s ) "
"and hr.id <> cast(%s as integer) "
"and hr.state = 'confirm' "
"and hrlrr.hotel_reservation_line_id in ("
"select hrlrr.hotel_reservation_line_id \
from hotel_reservation as hr "
"inner join hotel_reservation_line as \
hrl on hrl.line_id = hr.id "
"inner join hotel_reservation_line_room_rel \
as hrlrr on hrlrr.room_id = hrl.id "
"where hr.id = cast(%s as integer) )",
(reservation.checkin, reservation.checkout,
str(reservation.id), str(reservation.id)))
res = self._cr.fetchone()
roomcount = res and res[0] or 0.0
if roomcount:
raise except_orm(_('Warning'), _('You tried to confirm \
reservation with room those already reserved in this \
reservation period'))
else:
self.write({'state': 'confirm'})
for line_id in reservation.reservation_line:
line_id = line_id.reserve
for room_id in line_id:
vals = {
'room_id': room_id.id,
'check_in': reservation.checkin,
'check_out': reservation.checkout,
'state': 'assigned',
'reservation_id': reservation.id,
}
room_id.write({'isroom': False, 'status': 'occupied'})
reservation_line_obj.create(vals)
return True
@api.multi
def send_reservation_maill(self):
'''
This function opens a window to compose an email,
template message loaded by default.
@param self: object pointer
'''
assert len(self._ids) == 1, 'This is for a single id at a time.'
ir_model_data = self.env['ir.model.data']
try:
template_id = (ir_model_data.get_object_reference
('hotel_reservation',
'mail_template_hotel_reservation')[1])
except ValueError:
template_id = False
try:
compose_form_id = (ir_model_data.get_object_reference
('mail',
'email_compose_message_wizard_form')[1])
except ValueError:
compose_form_id = False
ctx = dict()
ctx.update({
'default_model': 'hotel.reservation',
'default_res_id': self._ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'force_send': True,
'mark_so_as_sent': True
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
'force_send': True
}
@api.model
def reservation_reminder_24hrs(self):
"""
This method is for scheduler
every 1day scheduler will call this method to
find all tomorrow's reservations.
----------------------------------------------
@param self: The object pointer
@return: send a mail
"""
now_str = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
now_date = datetime.datetime.strptime(now_str,
DEFAULT_SERVER_DATETIME_FORMAT)
ir_model_data = self.env['ir.model.data']
template_id = (ir_model_data.get_object_reference
('hotel_reservation',
'mail_template_reservation_reminder_24hrs')[1])
template_rec = self.env['mail.template'].browse(template_id)
for travel_rec in self.search([]):
checkin_date = (datetime.datetime.strptime
(travel_rec.checkin,
DEFAULT_SERVER_DATETIME_FORMAT))
difference = relativedelta(now_date, checkin_date)
if(difference.days == -1 and travel_rec.partner_id.email and
travel_rec.state == 'confirm'):
template_rec.send_mail(travel_rec.id, force_send=True)
return True
@api.multi
def _create_folio(self):
"""
This method is for create new hotel folio.
-----------------------------------------
@param self: The object pointer
@return: new record set for hotel folio.
"""
hotel_folio_obj = self.env['hotel.folio']
room_obj = self.env['hotel.room']
for reservation in self:
folio_lines = []
checkin_date = reservation['checkin']
checkout_date = reservation['checkout']
if not self.checkin < self.checkout:
raise except_orm(_('Error'),
_('Checkout date should be greater \
than the Checkin date.'))
duration_vals = (self.onchange_check_dates
(checkin_date=checkin_date,
checkout_date=checkout_date, duration=False))
duration = duration_vals.get('duration') or 0.0
folio_vals = {
'date_order': reservation.date_order,
'warehouse_id': reservation.warehouse_id.id,
'partner_id': reservation.partner_id.id,
'pricelist_id': reservation.pricelist_id.id,
'partner_invoice_id': reservation.partner_invoice_id.id,
'partner_shipping_id': reservation.partner_shipping_id.id,
'checkin_date': reservation.checkin,
'checkout_date': reservation.checkout,
'duration': duration,
'reservation_id': reservation.id,
'service_lines': reservation['folio_id']
}
date_a = (datetime.datetime
(*time.strptime(reservation['checkout'],
DEFAULT_SERVER_DATETIME_FORMAT)[:5]))
date_b = (datetime.datetime
(*time.strptime(reservation['checkin'],
DEFAULT_SERVER_DATETIME_FORMAT)[:5]))
for line in reservation.reservation_line:
for r in line.reserve:
folio_lines.append((0, 0, {
'checkin_date': checkin_date,
'checkout_date': checkout_date,
'product_id': r.product_id and r.product_id.id,
'name': reservation['reservation_no'],
'product_uom': r['uom_id'].id,
'price_unit': r['lst_price'],
'product_uom_qty': ((date_a - date_b).days) + 1
}))
res_obj = room_obj.browse([r.id])
res_obj.write({'status': 'occupied', 'isroom': False})
folio_vals.update({'room_lines': folio_lines})
folio = hotel_folio_obj.create(folio_vals)
self._cr.execute('insert into hotel_folio_reservation_rel'
'(order_id, invoice_id) values (%s,%s)',
(reservation.id, folio.id)
)
reservation.write({'state': 'done'})
return True
@api.multi
def onchange_check_dates(self, checkin_date=False, checkout_date=False,
duration=False):
'''
This mathod gives the duration between check in checkout if
customer will leave only for some hour it would be considers
as a whole day. If customer will checkin checkout for more or equal
hours, which configured in company as additional hours than it would
be consider as full days
--------------------------------------------------------------------
@param self: object pointer
@return: Duration and checkout_date
'''
value = {}
company_obj = self.env['res.company']
configured_addition_hours = 0
company_ids = company_obj.search([])
if company_ids.ids:
configured_addition_hours = company_ids[0].additional_hours
duration = 0
if checkin_date and checkout_date:
chkin_dt = (datetime.datetime.strptime
(checkin_date, DEFAULT_SERVER_DATETIME_FORMAT))
chkout_dt = (datetime.datetime.strptime
(checkout_date, DEFAULT_SERVER_DATETIME_FORMAT))
dur = chkout_dt - chkin_dt
duration = dur.days + 1
if configured_addition_hours > 0:
additional_hours = abs((dur.seconds / 60) / 60)
if additional_hours >= configured_addition_hours:
duration += 1
value.update({'duration': duration})
return value
@api.model
def create(self, vals):
"""
Overrides orm create method.
@param self: The object pointer
@param vals: dictionary of fields value.
"""
if not vals:
vals = {}
if self._context is None:
self._context = {}
seq_obj = self.env['ir.sequence']
hotel_reserve = seq_obj.next_by_code('hotel.reservation') or 'New'
vals['reservation_no'] = hotel_reserve
return super(HotelReservation, self).create(vals)
class HotelReservationLine(models.Model):
_name = "hotel_reservation.line"
_description = "Reservation Line"
name = fields.Char('Name', size=64)
line_id = fields.Many2one('hotel.reservation')
reserve = fields.Many2many('hotel.room',
'hotel_reservation_line_room_rel',
'room_id', 'hotel_reservation_line_id',
domain="[('isroom','=',True),\
('categ_id','=',categ_id)]")
categ_id = fields.Many2one('product.category', 'Room Type',
domain="[('isroomtype','=',True)]",
change_default=True)
@api.onchange('categ_id')
def on_change_categ(self):
'''
When you change categ_id it check checkin and checkout are
filled or not if not then raise warning
-----------------------------------------------------------
@param self: object pointer
'''
hotel_room_obj = self.env['hotel.room']
hotel_room_ids = hotel_room_obj.search([('categ_id', '=',
self.categ_id.id),
('isroom', '=', True)])
assigned = False
room_ids = []
if not self.line_id.checkin:
raise except_orm(_('Warning'),
_('Before choosing a room,\n You have to select \
a Check in date or a Check out date in \
the reservation form.'))
for room in hotel_room_ids:
assigned = False
for line in room.room_reservation_line_ids:
if(line.check_in >= self.line_id.checkin and
line.check_in <= self.line_id.checkout or
line.check_out <= self.line_id.checkout and
line.check_out >= self.line_id.checkin):
assigned = True
if not assigned:
room_ids.append(room.id)
domain = {'reserve': [('id', 'in', room_ids)]}
return {'domain': domain}
@api.multi
def unlink(self):
"""
Overrides orm unlink method.
@param self: The object pointer
@return: True/False.
"""
hotel_room_reserv_line_obj = self.env['hotel.room.reservation.line']
for reserv_rec in self:
for rec in reserv_rec.reserve:
hres_arg = [('room_id', '=', rec.id),
('reservation_id', '=', reserv_rec.line_id.id)]
myobj = hotel_room_reserv_line_obj.search(hres_arg)
if myobj.ids:
rec.write({'isroom': True, 'status': 'available'})
myobj.unlink()
return super(HotelReservationLine, self).unlink()
class HotelRoomReservationLine(models.Model):
_name = 'hotel.room.reservation.line'
_description = 'Hotel Room Reservation'
_rec_name = 'room_id'
room_id = fields.Many2one(comodel_name='hotel.room', string='Room id')
check_in = fields.Datetime('Check In Date', required=True)
check_out = fields.Datetime('Check Out Date', required=True)
state = fields.Selection([('assigned', 'Assigned'),
('unassigned', 'Unassigned')], 'Room Status')
reservation_id = fields.Many2one('hotel.reservation',
string='Reservation')
status = fields.Selection(string='state', related='reservation_id.state')
class HotelRoom(models.Model):
_inherit = 'hotel.room'
_description = 'Hotel Room'
room_reservation_line_ids = fields.One2many('hotel.room.reservation.line',
'room_id',
string='Room Reserv Line')
@api.model
def cron_room_line(self):
"""
This method is for scheduler
every 1min scheduler will call this method and check Status of
room is occupied or available
--------------------------------------------------------------
@param self: The object pointer
@return: update status of hotel room reservation line
"""
reservation_line_obj = self.env['hotel.room.reservation.line']
folio_room_line_obj = self.env['folio.room.line']
now = datetime.datetime.now()
curr_date = now.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
for room in self.search([]):
reserv_line_ids = [reservation_line.ids for
reservation_line in
room.room_reservation_line_ids]
reserv_args = [('id', 'in', reserv_line_ids),
('check_in', '<=', curr_date),
('check_out', '>=', curr_date)]
reservation_line_ids = reservation_line_obj.search(reserv_args)
rooms_ids = [room_line.ids for room_line in room.room_line_ids]
rom_args = [('id', 'in', rooms_ids),
('check_in', '<=', curr_date),
('check_out', '>=', curr_date)]
room_line_ids = folio_room_line_obj.search(rom_args)
status = {'isroom': True, 'color': 5}
if reservation_line_ids.ids:
status = {'isroom': False, 'color': 2}
room.write(status)
if room_line_ids.ids:
status = {'isroom': False, 'color': 2}
room.write(status)
if reservation_line_ids.ids and room_line_ids.ids:
raise except_orm(_('Wrong Entry'),
_('Please Check Rooms Status \
for %s.' % (room.name)))
return True
class RoomReservationSummary(models.Model):
_name = 'room.reservation.summary'
_description = 'Room reservation summary'
date_from = fields.Datetime('Date From')
date_to = fields.Datetime('Date To')
summary_header = fields.Text('Summary Header')
room_summary = fields.Text('Room Summary')
@api.model
def default_get(self, fields):
"""
To get default values for the object.
@param self: The object pointer.
@param fields: List of fields for which we want default values
@return: A dictionary which of fields with values.
"""
if self._context is None:
self._context = {}
res = super(RoomReservationSummary, self).default_get(fields)
if not self.date_from and self.date_to:
date_today = datetime.datetime.today()
first_day = datetime.datetime(date_today.year,
date_today.month, 1, 0, 0, 0)
first_temp_day = first_day + relativedelta(months=1)
last_temp_day = first_temp_day - relativedelta(days=1)
last_day = datetime.datetime(last_temp_day.year,
last_temp_day.month,
last_temp_day.day, 23, 59, 59)
date_froms = first_day.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
date_ends = last_day.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
res.update({'date_from': date_froms, 'date_to': date_ends})
return res
@api.multi
def room_reservation(self):
'''
@param self: object pointer
'''
mod_obj = self.env['ir.model.data']
if self._context is None:
self._context = {}
model_data_ids = mod_obj.search([('model', '=', 'ir.ui.view'),
('name', '=',
'view_hotel_reservation_form')])
resource_id = model_data_ids.read(fields=['res_id'])[0]['res_id']
return {'name': _('Reconcile Write-Off'),
'context': self._context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'hotel.reservation',
'views': [(resource_id, 'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
@api.onchange('date_from', 'date_to')
def get_room_summary(self):
'''
@param self: object pointer
'''
res = {}
all_detail = []
room_obj = self.env['hotel.room']
reservation_line_obj = self.env['hotel.room.reservation.line']
date_range_list = []
main_header = []
summary_header_list = ['Rooms']
if self.date_from and self.date_to:
if self.date_from > self.date_to:
raise except_orm(_('User Error!'),
_('Please Check Time period Date \
From can\'t be greater than Date To !'))
d_frm_obj = (datetime.datetime.strptime
(self.date_from, DEFAULT_SERVER_DATETIME_FORMAT))
d_to_obj = (datetime.datetime.strptime
(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT))
temp_date = d_frm_obj
while(temp_date <= d_to_obj):
val = ''
val = (str(temp_date.strftime("%a")) + ' ' +
str(temp_date.strftime("%b")) + ' ' +
str(temp_date.strftime("%d")))
summary_header_list.append(val)
date_range_list.append(temp_date.strftime
(DEFAULT_SERVER_DATETIME_FORMAT))
temp_date = temp_date + datetime.timedelta(days=1)
all_detail.append(summary_header_list)
room_ids = room_obj.search([])
all_room_detail = []
for room in room_ids:
room_detail = {}
room_list_stats = []
room_detail.update({'name': room.name or ''})
if not room.room_reservation_line_ids:
for chk_date in date_range_list:
room_list_stats.append({'state': 'Free',
'date': chk_date})
else:
for chk_date in date_range_list:
for room_res_line in room.room_reservation_line_ids:
reservline_ids = [i.ids for i in
room.room_reservation_line_ids]
reservline_ids = (reservation_line_obj.search
([('id', 'in', reservline_ids),
('check_in', '<=', chk_date),
('check_out', '>=', chk_date)
]))
if reservline_ids:
room_list_stats.append({'state': 'Reserved',
'date': chk_date,
'room_id': room.id})
break
else:
room_list_stats.append({'state': 'Free',
'date': chk_date,
'room_id': room.id})
break
room_detail.update({'value': room_list_stats})
all_room_detail.append(room_detail)
main_header.append({'header': summary_header_list})
self.summary_header = str(main_header)
self.room_summary = str(all_room_detail)
return res
class QuickRoomReservation(models.TransientModel):
_name = 'quick.room.reservation'
_description = 'Quick Room Reservation'
partner_id = fields.Many2one('res.partner', string="Customer",
required=True)
check_in = fields.Datetime('Check In', required=True)
check_out = fields.Datetime('Check Out', required=True)
room_id = fields.Many2one('hotel.room', 'Room', required=True)
warehouse_id = fields.Many2one('stock.warehouse', 'Hotel', required=True)
pricelist_id = fields.Many2one('product.pricelist', 'pricelist',
required=True)
partner_invoice_id = fields.Many2one('res.partner', 'Invoice Address',
required=True)
partner_order_id = fields.Many2one('res.partner', 'Ordering Contact',
required=True)
partner_shipping_id = fields.Many2one('res.partner', 'Delivery Address',
required=True)
@api.onchange('check_out', 'check_in')
def on_change_check_out(self):
'''
When you change checkout or checkin it will check whether
Checkout date should be greater than Checkin date
and update dummy field
-----------------------------------------------------------
@param self: object pointer
@return: raise warning depending on the validation
'''
if self.check_out and self.check_in:
if self.check_out < self.check_in:
raise except_orm(_('Warning'),
_('Checkout date should be greater \
than Checkin date.'))
@api.onchange('partner_id')
def onchange_partner_id_res(self):
'''
When you change partner_id it will update the partner_invoice_id,
partner_shipping_id and pricelist_id of the hotel reservation as well
---------------------------------------------------------------------
@param self: object pointer
'''
if not self.partner_id:
self.partner_invoice_id = False
self.partner_shipping_id = False
self.partner_order_id = False
else:
addr = self.partner_id.address_get(['delivery', 'invoice',
'contact'])
self.partner_invoice_id = addr['invoice']
self.partner_order_id = addr['contact']
self.partner_shipping_id = addr['delivery']
self.pricelist_id = self.partner_id.property_product_pricelist.id
@api.model
def default_get(self, fields):
"""
To get default values for the object.
@param self: The object pointer.
@param fields: List of fields for which we want default values
@return: A dictionary which of fields with values.
"""
if self._context is None:
self._context = {}
res = super(QuickRoomReservation, self).default_get(fields)
if self._context:
keys = self._context.keys()
if 'date' in keys:
res.update({'check_in': self._context['date']})
if 'room_id' in keys:
roomid = self._context['room_id']
res.update({'room_id': int(roomid)})
return res
@api.multi
def room_reserve(self):
"""
This method create a new record for hotel.reservation
-----------------------------------------------------
@param self: The object pointer
@return: new record set for hotel reservation.
"""
hotel_res_obj = self.env['hotel.reservation']
for res in self:
(hotel_res_obj.create
({'partner_id': res.partner_id.id,
'partner_invoice_id': res.partner_invoice_id.id,
'partner_order_id': res.partner_order_id.id,
'partner_shipping_id': res.partner_shipping_id.id,
'checkin': res.check_in,
'checkout': res.check_out,
'warehouse_id': res.warehouse_id.id,
'pricelist_id': res.pricelist_id.id,
'reservation_line': [(0, 0,
{'reserve': [(6, 0, [res.room_id.id])],
'name': (res.room_id and
res.room_id.name or '')
})]
}))
return True
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A factorized retrieval task extended for distributionally-robust learning."""
import sys
from typing import List, Optional, Text, Union
import numpy as np
import tensorflow as tf
import tensorflow_recommenders as tfrs
class RobustRetrieval(tf.keras.layers.Layer):
"""A factorized retrieval task for distributionally-robust learning.
Recommender systems are often composed of two components:
- a retrieval model, retrieving O(thousands) candidates from a corpus of
O(millions) candidates.
- a ranker model, scoring the candidates retrieved by the retrieval model to
return a ranked shortlist of a few dozen candidates.
This task defines models that facilitate efficient retrieval of candidates
from large corpora by maintaining a two-tower, factorized structure: separate
query and candidate representation towers, joined at the top via a lightweight
scoring function.
We implemented a robust learning technique based on Distributionally-Robust
Optimization (DRO), which aims to improve worse-case subgroup performance.
We further propose several improvements for the robust retrieval model.
- Streaming-loss DRO: keep streaming estimations of group loss.
- Metric-based DRO: replace loss-based re-weighting with metric-based
re-weighting.
- KL-regularization on DRO: penalize weight distributions that diverge from
empirical distributions.
Reference:
Sagawa S, Koh P W, et al. Distributionally robust neural networks for group
shifts: On the importance of regularization for worst-case generalization.
https://arxiv.org/abs/1911.08731.
"""
def __init__(self,
group_labels: Union[List[Text], List[int]],
group_loss_init: List[float],
group_metric_init: List[float],
group_weight_init: List[float],
group_reweight_strategy: Text,
loss: Optional[tf.keras.losses.Loss] = None,
metrics: Optional[tfrs.metrics.FactorizedTopK] = None,
topk: Optional[int] = 100,
candidates: Optional[tf.data.Dataset] = None,
temperature: Optional[float] = None,
num_hard_negatives: Optional[int] = None,
dro_temperature: Optional[float] = 0.1,
streaming_group_loss: Optional[bool] = False,
streaming_group_loss_lr: Optional[float] = 0.01,
streaming_group_metric_lr: Optional[float] = 0.01,
group_metric_with_decay: Optional[bool] = False,
metric_update_freq: Optional[int] = 1,
name: Optional[Text] = "robust_retrieval_task") -> None:
"""Initializes the task.
Args:
group_labels: A list of integers or strings as group identity labels. Used
to define subgroups for optimizing robust loss.
group_loss_init: A list of [num_groups] floats for group loss
initialization, e.g. [1.0, 2.0, 3.0].
group_metric_init: A list of [num_groups] floats for group metric
initialization, e.g. [0.0, 0.0, 0.0].
group_weight_init: A list of [num_groups] floats for group weight
initialization that add up to 1, e.g. [0.3, 0.2, 0.5].
group_reweight_strategy: Group reweighting strategy. Shall be one of
["group-dro", "uniform"].
loss: Loss function. Defaults to
`tf.keras.losses.CategoricalCrossentropy`.
metrics: Object for evaluating top-K metrics over a corpus of candidates.
These metrics measure how good the model is at picking the true
candidate out of all possible candidates in the system. Note, because
the metrics range over the entire candidate set, they are usually much
slower to compute. Consider set `compute_metrics=False` during training
to save the time in computing the metrics.
topk: Number of top scoring candidates to retrieve for metric evaluation.
candidates: A set of candidate items.
temperature: Temperature of the softmax.
num_hard_negatives: If positive, the `num_hard_negatives` negative
examples with largest logits are kept when computing cross-entropy loss.
If larger than batch size or non-positive, all the negative examples are
kept.
dro_temperature: A float, temperature of the group re-weighting in DRO. A
suggested range is between [0.001,0.1].
streaming_group_loss: if `True` will use streaming loss estimations.
streaming_group_loss_lr: between [0,1], larger value will let the
estimations of group loss focus more on the current batch.
streaming_group_metric_lr: between [0,1], larger value will let the
estimations of group metric focus more on the current batch.
group_metric_with_decay: if `True` will use decay for group metric update.
metric_update_freq: group metric updates every after n batch.
name: Optional task name.
"""
super().__init__(name=name)
# Robust training settings.
self._group_labels = group_labels
self._group_labels_matrix = tf.reshape(np.array(group_labels), [-1, 1])
self._num_groups = len(group_labels)
self._group_reweight_strategy = group_reweight_strategy
self._dro_temperature = dro_temperature
self._streaming_group_loss = streaming_group_loss
self._streaming_group_loss_lr = streaming_group_loss_lr
self._streaming_group_metric_lr = streaming_group_metric_lr
self._metric_update_freq = metric_update_freq
self._group_metric_with_decay = group_metric_with_decay
# Initialization of group weights.
self._group_weights = tf.Variable(
initial_value=tf.convert_to_tensor(group_weight_init, dtype=tf.float32),
trainable=False)
# Initialization of group loss.
self._group_loss = tf.Variable(
initial_value=tf.convert_to_tensor(group_loss_init, dtype=tf.float32),
trainable=False)
self._sample_loss = (
loss if loss is not None else tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE))
self._topk = topk
self._factorized_metrics = metrics
if isinstance(candidates, tf.data.Dataset):
candidates = tfrs.layers.factorized_top_k.Streaming(
k=self._topk).index_from_dataset(candidates)
# Initialization of group metric.
self._group_metric_estimates = tf.Variable(
initial_value=tf.convert_to_tensor(group_metric_init, dtype=tf.float32),
trainable=False)
self._group_metrics = []
for x in range(self._num_groups):
self._group_metrics.append(
tf.keras.metrics.TopKCategoricalAccuracy(
k=self._topk,
name=f"group_{self._group_labels[x]}_top{self._topk}_accuracy",
))
self._candidates = candidates
self._temperature = temperature
self._num_hard_negatives = num_hard_negatives
self._name = name
@property
def factorized_metrics(self) -> Optional[tfrs.metrics.FactorizedTopK]:
"""The metrics object used to compute retrieval metrics."""
return self._factorized_metrics
@factorized_metrics.setter
def factorized_metrics(self,
value: Optional[tfrs.metrics.FactorizedTopK]) -> None:
"""Sets factorized metrics."""
self._factorized_metrics = value
def call(self,
query_embeddings: tf.Tensor,
candidate_embeddings: tf.Tensor,
group_identity: tf.Tensor,
step_count: tf.Tensor,
sample_weight: Optional[tf.Tensor] = None,
candidate_sampling_probability: Optional[tf.Tensor] = None,
candidate_ids: Optional[tf.Tensor] = None,
compute_metrics: bool = True) -> tf.Tensor:
"""Computes the task loss and metrics.
The main argument are pairs of query and candidate embeddings: the first row
of query_embeddings denotes a query for which the candidate from the first
row of candidate embeddings was selected by the user.
The task will try to maximize the affinity of these query, candidate pairs
while minimizing the affinity between the query and candidates belonging
to other queries in the batch.
Args:
query_embeddings: [num_queries, embedding_dim] tensor of query
representations.
candidate_embeddings: [num_queries, embedding_dim] tensor of candidate
representations.
group_identity: [num_queries] tensor of query group identity.
step_count: Number of training steps.
sample_weight: [num_queries] tensor of sample weights.
candidate_sampling_probability: Optional tensor of candidate sampling
probabilities. When given will be be used to correct the logits to
reflect the sampling probability of negative candidates.
candidate_ids: Optional tensor containing candidate ids. When given
enables removing accidental hits of examples used as negatives. An
accidental hit is defined as an candidate that is used as an in-batch
negative but has the same id with the positive candidate.
compute_metrics: If true, metrics will be computed. Because evaluating
metrics may be slow, consider disabling this in training.
Returns:
loss: Tensor of loss values.
"""
scores = tf.linalg.matmul(
query_embeddings, candidate_embeddings, transpose_b=True)
num_queries = tf.shape(scores)[0]
num_candidates = tf.shape(scores)[1]
labels = tf.eye(num_queries, num_candidates)
if candidate_sampling_probability is not None:
scores = tfrs.layers.loss.SamplingProbablityCorrection()(
scores, candidate_sampling_probability)
if candidate_ids is not None:
scores = tfrs.layers.loss.RemoveAccidentalHits()(labels, scores,
candidate_ids)
if self._num_hard_negatives is not None:
scores, labels = tfrs.layers.loss.HardNegativeMining(
self._num_hard_negatives)(scores, labels)
if self._temperature is not None:
scores = scores / self._temperature
sample_loss = self._sample_loss(y_true=labels, y_pred=scores)
# group_mask: [num_groups, num_queries], cur_group_loss: [num_groups]
cur_group_loss, group_mask = self._compute_group_loss(
sample_loss, group_identity)
# Set default DRO update ops.
group_loss_update = tf.no_op()
group_metric_update = tf.no_op()
group_weights_update = tf.no_op()
# Note: only update loss/metric estimations when subgroup exists in a batch.
# group_exist_in_batch: [num_groups], bool
group_exist_in_batch = tf.math.reduce_sum(group_mask, axis=1) > 1e-16
if self._streaming_group_loss:
# Perform streaming estimation of group loss.
stream_group_loss = (
1 - tf.cast(group_exist_in_batch, "float32") *
self._streaming_group_loss_lr
) * self._group_loss + self._streaming_group_loss_lr * cur_group_loss
group_loss_update = self._group_loss.assign(
stream_group_loss, read_value=False)
else:
group_loss_update = self._group_loss.assign(
cur_group_loss, read_value=False)
if self._group_reweight_strategy == "loss-dro":
# Perform loss-based group weight updates.
with tf.control_dependencies([group_loss_update]):
group_weights_update = self._update_group_weights(self._group_loss)
elif self._group_reweight_strategy == "metric-dro":
# Perform metric-based group weight updates.
# Note: only update when subgroup exists in a batch.
# Assuming only update weights at every `_metric_update_freq` epochs
# TODO(xinyang,tyao,jiaxit): change to sampled metric for effiency.
if (step_count % self._metric_update_freq) == 0:
batch_group_metric_update = self._update_group_metrics(
query_embeddings, candidate_embeddings, group_mask)
with tf.control_dependencies([batch_group_metric_update]):
if self._group_metric_with_decay:
stream_group_metric_lr = tf.cast(
group_exist_in_batch,
"float32") * self._streaming_group_metric_lr
stream_group_metrics = (
1 - stream_group_metric_lr
) * self._group_metric_estimates + stream_group_metric_lr * self.get_group_metrics(
)
group_metric_update = self._group_metric_estimates.assign(
stream_group_metrics, read_value=False)
group_weights_update = self._update_group_weights(
1 - stream_group_metrics)
else:
group_weights_update = self._update_group_weights(
1 - self.get_group_metrics())
update_ops = [group_loss_update, group_metric_update, group_weights_update]
if compute_metrics and (self._factorized_metrics is not None):
update_ops.append(
self._factorized_metrics.update_state(query_embeddings,
candidate_embeddings))
with tf.control_dependencies([tf.group(update_ops)]):
# Add group log for analysis and debuggging.
self._add_group_logs(cur_group_loss, step_count)
return tf.reduce_sum(
tf.stop_gradient(self._group_weights) * cur_group_loss) * tf.cast(
num_queries, dtype="float32")
def _compute_group_loss(self, sample_loss, group_identity):
"""Calculate subgroup losses.
Args:
sample_loss: Tensor of [num_queries] representing loss for each query.
group_identity: Tensor of [num_queries] representing the group identity
for each query.
Returns:
group_loss: Tensor of group loss values.
group_mask: Tensor of [num_groups, num_queries].
"""
# Shape of group_mask: [num_groups, num_queries].
group_mask = tf.cast(
tf.equal(group_identity, self._group_labels_matrix), dtype="float32")
group_cnts = tf.reduce_sum(group_mask, axis=1)
# Avoid divide by zero.
group_cnts += tf.cast(group_cnts == 0, dtype="float32")
# group loss shape: [num_groups]
group_loss = tf.divide(
tf.reduce_sum(group_mask * sample_loss, axis=1), group_cnts)
return group_loss, group_mask
def _update_group_metrics(self, query_embeddings, true_candidate_embeddings,
group_mask):
"""Perform group metric updates."""
# [batch_size, 1]
positive_scores = tf.reduce_sum(
query_embeddings * true_candidate_embeddings, axis=1, keepdims=True)
# [batch_size, k]
top_k_predictions, _ = self._candidates(query_embeddings, k=self._topk)
y_true = tf.concat(
[tf.ones_like(positive_scores),
tf.zeros_like(top_k_predictions)],
axis=1)
y_pred = tf.concat([positive_scores, top_k_predictions], axis=1)
update_ops = []
for group_id, metric in enumerate(self._group_metrics):
if self._group_metric_with_decay:
# Reset states to get batch-wise metrics.
metric.reset_states()
update_ops.append(
metric.update_state(
y_true=y_true, y_pred=y_pred, sample_weight=group_mask[group_id]))
return tf.group(update_ops)
def _update_group_weights(self, group_hardness, read_value=False):
"""Compute subgroup weights.
Args:
group_hardness: Tensor of [num_groups] representing hardness for each
subgroup, for example, group loss or group metric.
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op. See
tf.Variable.assign.
Returns:
group_weights_assign_op: Op of group weights assignment.
"""
new_group_weights = tf.nn.softmax(
tf.math.log(self._group_weights) +
self._dro_temperature * group_hardness,
axis=-1)
group_weights_assign_op = self._group_weights.assign(
new_group_weights, read_value=read_value)
return group_weights_assign_op
def _get_group_metrics(self):
"""Return the latest subgroup metrics."""
return tf.convert_to_tensor(
[metric.result() for metric in self._group_metrics])
def _add_group_logs(self, cur_group_loss, step_count):
"""Add to group loss and weights to tensorboard."""
tf.print("step_count:", step_count, output_stream=sys.stdout)
tf.print("group loss:", cur_group_loss, output_stream=sys.stdout)
tf.print("group est. loss:", self._group_loss, output_stream=sys.stdout)
tf.print("group weights:", self._group_weights, output_stream=sys.stdout)
group_summary = {}
group_loss_summary = {
f"Batch_GroupLoss_{self._group_labels[i]}": cur_group_loss[i]
for i in range(self._num_groups)
}
group_loss_est_summary = {
f"Est_GroupLoss_{self._group_labels[i]}": self._group_loss[i]
for i in range(self._num_groups)
}
group_weight_summary = {
f"GroupWeights_{self._group_labels[i]}": self._group_weights[i]
for i in range(self._num_groups)
}
group_summary.update(group_loss_summary)
group_summary.update(group_loss_est_summary)
group_summary.update(group_weight_summary)
self._add_tf_scalar_summary(group_summary, step_count)
def _add_tf_histogram_summary(self, tensor_dict, step):
for key, tensor in tensor_dict.items():
if tensor is not None:
if self._name is not None:
tf.summary.histogram(f"{self._name}/{key}", tensor, step=step)
else:
tf.summary.histogram(key, tensor, step=step)
def _add_tf_scalar_summary(self, tensor_dict, step):
for key, tensor in tensor_dict.items():
if tensor is not None:
if self._name is not None:
tf.summary.scalar(f"{self._name}/{key}", tensor, step=step)
else:
tf.summary.scalar(key, tensor, step=step)
|
|
import sys
import unittest
import numpy
from chainer import cuda
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check as T
class TestConstant(unittest.TestCase):
def setUp(self):
self.x = T.Constant(10)
def test_str(self):
self.assertEqual('10', str(self.x))
def test_eval(self):
self.assertEqual(10, self.x.eval())
class TestVariable(unittest.TestCase):
def setUp(self):
self.x = T.Variable(10, 'x')
def test_str(self):
self.assertEqual('x', str(self.x))
def test_eval(self):
self.assertEqual(10, self.x.eval())
class Object(object):
def __init__(self):
self.value = 10
class TestGetAttr(unittest.TestCase):
def setUp(self):
x = Object()
self.value = T.GetAttr(T.Variable(x, 'x'), 'value')
self.value2 = T.GetAttr(T.Variable(x, 'x'), T.Constant('value'))
self.value3 = T.GetAttr(T.Variable(x, 'x'), 3)
def test_str(self):
self.assertEqual('x.value', str(self.value))
self.assertEqual('x.value', str(self.value2))
self.assertEqual('getattr(x, 3)', str(self.value3))
def test_eval(self):
self.assertEqual(10, self.value.eval())
class TestGetItem(unittest.TestCase):
def setUp(self):
x = T.Variable([1, 2, 3], 'x')
y = T.Variable({'a': 1, 'b': 2}, 'y')
self.x = x
self.v1 = T.GetItem(x, 1)
self.v2 = T.GetItem(y, 'a')
def test_str(self):
self.assertEqual('x[1]', str(self.v1))
self.assertEqual("y['a']", str(self.v2))
x = self.x
self.assertEqual('x[:]', str(x[:]))
self.assertEqual('x[:]', str(x[::]))
self.assertEqual('x[1:]', str(x[1:]))
self.assertEqual('x[:2]', str(x[:2]))
self.assertEqual('x[1:2]', str(x[1:2]))
self.assertEqual('x[1::1]', str(x[1::1]))
self.assertEqual('x[:2:1]', str(x[:2:1]))
self.assertEqual('x[1:2:1]', str(x[1:2:1]))
self.assertEqual('x[...]', str(x[...]))
self.assertEqual('x[0, 1]', str(x[0, 1]))
self.assertEqual('x[1:2, ...]', str(x[1:2:, ...]))
def test_eval(self):
self.assertEqual(2, self.v1.eval())
self.assertEqual(1, self.v2.eval())
class TestCall(unittest.TestCase):
def setUp(self):
f = T.Variable(sum, 'sum')
self.c1 = T.Call(f, ([1, 2, 3],))
self.c2 = f([1, 2, 3])
self.c3 = T.Call(f, (['', 1],))
def test_str(self):
self.assertEqual('sum([1, 2, 3])', str(self.c1))
self.assertEqual('sum([1, 2, 3])', str(self.c2))
self.assertEqual('sum([\'\', 1])', str(self.c3))
def test_eval(self):
self.assertEqual(6, self.c1.eval())
self.assertEqual(6, self.c2.eval())
# an error is occured in `eval`
with self.assertRaises(TypeError):
self.assertEqual(6, self.c3.eval())
class TestBinaryOperator(unittest.TestCase):
def setUp(self):
x = T.Variable(1, 'x')
y = T.Variable(1, 'y')
def f(x, y):
return x, y
self.op1 = T.BinaryOperator(7, x, y, '+', f)
self.op2 = T.BinaryOperator(8, x, y, '+', f)
self.op3 = T.BinaryOperator(9, x, y, '+', f)
self.op4 = T.BinaryOperator(7, x, y, '+', f, True)
self.op5 = T.BinaryOperator(8, x, y, '+', f, True)
self.op6 = T.BinaryOperator(9, x, y, '+', f, True)
def test_str(self):
self.assertEqual('x + y', str(self.op1))
self.assertEqual('x + (y)', str(self.op2))
self.assertEqual('(x) + (y)', str(self.op3))
self.assertEqual('x + y', str(self.op4))
self.assertEqual('(x) + y', str(self.op5))
self.assertEqual('(x) + (y)', str(self.op6))
def test_eval(self):
self.assertEqual((1, 1), self.op1.eval())
class TestUnaryOperator(unittest.TestCase):
def setUp(self):
x = T.Variable(1, 'x')
def f(x):
return x,
self.op1 = T.UnaryOperator(8, x, '-', f)
self.op2 = T.UnaryOperator(9, x, '-', f)
def test_str(self):
self.assertEqual('-x', str(self.op1))
self.assertEqual('-(x)', str(self.op2))
def test_eval(self):
self.assertEqual((1, ), self.op1.eval())
class TestOperators(unittest.TestCase):
def setUp(self):
self.x = T.Variable(1, 'x')
self.y = T.Variable(1, 'y')
def test_str(self):
x = self.x
y = self.y
self.assertEqual('x + y', str(x + y))
self.assertEqual('1 + x', str(1 + x))
self.assertEqual('x - y', str(x - y))
self.assertEqual('1 - x', str(1 - x))
self.assertEqual('x * y', str(x * y))
self.assertEqual('1 * x', str(1 * x))
self.assertEqual('x / y', str(x / y))
self.assertEqual('1 / x', str(1 / x))
self.assertEqual('x // y', str(x // y))
self.assertEqual('1 // x', str(1 // x))
self.assertEqual('x % y', str(x % y))
self.assertEqual('1 % x', str(1 % x))
self.assertEqual('x ** y', str(x ** y))
self.assertEqual('x ** y', str(pow(x, y)))
self.assertEqual('x << y', str(x << y))
self.assertEqual('1 << x', str(1 << x))
self.assertEqual('x >> y', str(x >> y))
self.assertEqual('1 >> x', str(1 >> x))
self.assertEqual('x & y', str(x & y))
self.assertEqual('1 & x', str(1 & x))
self.assertEqual('x ^ y', str(x ^ y))
self.assertEqual('1 ^ x', str(1 ^ x))
self.assertEqual('x | y', str(x | y))
self.assertEqual('1 | x', str(1 | x))
self.assertEqual('-x', str(-x))
self.assertEqual('+x', str(+x))
self.assertEqual('~x', str(~x))
# left-associative
self.assertEqual('x + x - x', str(x + x - x))
self.assertEqual('x + (x - x)', str(x + (x - x)))
self.assertEqual('x << (x << x)', str(x << (x << x)))
# right-associative
self.assertEqual('x ** x ** x', str(x ** x ** x))
self.assertEqual('x ** x ** x', str(x ** (x ** x)))
self.assertEqual('(x ** x) ** x', str((x ** x) ** x))
self.assertEqual('-(x + x)', str(-(x + x)))
# pow has higher priority than unary operators
self.assertEqual('-x ** x', str(-x ** x))
self.assertEqual('(-x) ** x', str((-x) ** x))
def test_priority(self):
x = self.x
y = self.y
self.assertTrue((x << y).priority == (x >> y).priority)
self.assertTrue((x + y).priority == (x - y).priority)
self.assertTrue((x * y).priority ==
(x / y).priority ==
(x // y).priority ==
(x % y).priority)
self.assertTrue((-x).priority == (+x).priority == (~x).priority)
self.assertTrue((x | y).priority <
(x ^ y).priority <
(x & y).priority <
(x << y).priority <
(x + y).priority <
(x * y).priority <
(-x).priority <
(x ** y).priority <
x.priority)
class TestDivOperator(unittest.TestCase):
def setUp(self):
self.x = T.Variable(1, 'x')
self.y = T.Variable(2, 'y')
def test_div(self):
# Behavior of '/' operator for int depends on the version of Python
if sys.version_info < (3, 0, 0):
self.assertEqual(0, (self.x / self.y).eval())
else:
self.assertEqual(0.5, (self.x / self.y).eval())
class TestGetType(unittest.TestCase):
def test_empty(self):
ts = T.get_types((), 'name', False)
self.assertIsInstance(ts, T.TypeInfoTuple)
self.assertEqual(0, len(ts))
self.assertEqual('name', ts.name)
def test_simple(self):
data = (numpy.zeros((1, 2, 3)).astype(numpy.float32),)
ts = T.get_types(data, 'name', False)
self.assertIsInstance(ts, T.TypeInfoTuple)
self.assertEqual(1, len(ts))
self.assertEqual('name', ts.name)
t = ts[0]
self.assertIsInstance(t, T.Expr)
self.assertEqual(1, t.shape[0].eval())
self.assertEqual(2, t.shape[1].eval())
self.assertEqual(3, t.shape[2].eval())
self.assertEqual(3, t.ndim.eval())
self.assertEqual(numpy.float32, t.dtype.eval())
def test_invalid_arg(self):
with self.assertRaises(AssertionError):
T.get_types(1, 'name', False)
class TestBoolBinaryOperator(unittest.TestCase):
def setUp(self):
x = T.Variable(1, 'x')
y = T.Variable(1, 'y')
z = T.Variable(2, 'z')
def f(x, y):
return x == y
self.op1 = T.BoolBinaryOperator(x, y, '==', '!=', f)
self.op2 = T.BoolBinaryOperator(x, z, '==', '!=', f)
def test_eval(self):
self.assertTrue(self.op1.eval())
def test_expect(self):
with self.assertRaises(T.InvalidType):
self.op2.expect()
def test_bool(self):
with self.assertRaises(RuntimeError):
bool(self.op1)
def test_bool_operator(self):
with self.assertRaises(RuntimeError):
not self.op1
class TestLazyGetItem(unittest.TestCase):
def setUp(self):
self.t = T.Constant(0)
def test_evaluate_size(self):
# __getitem__, __getattr__ and __call__ only make syntax trees, but
# they are not evalated yet
self.assertIsInstance(self.t[1], T.Expr)
self.assertIsInstance(self.t.x, T.Expr)
self.assertIsInstance(self.t(), T.Expr)
# an error is raised on evaluation time
with self.assertRaises(TypeError):
self.t[1].eval()
with self.assertRaises(AttributeError):
self.t.x.eval()
with self.assertRaises(TypeError):
self.t().eval()
class TestListItem(unittest.TestCase):
def test_eval_list_items(self):
self.assertTrue((T.Constant([0]) == [T.Constant(0)]).eval())
def test_list_str(self):
self.assertEqual('[0]', T._repr([T.Constant(0)]))
def test_eval_tuple_items(self):
self.assertTrue((T.Constant((0,)) == (T.Constant(0),)).eval())
def test_tuple_str(self):
self.assertEqual('()', T._repr(()))
self.assertEqual('(0,)', T._repr((T.Constant(0),)))
self.assertEqual('(0, 0)', T._repr((T.Constant(0), T.Constant(0))))
def test_eval_nest_list(self):
self.assertTrue((T.Constant([[0]]) == [[T.Constant(0)]]).eval())
def test_nest_list_str(self):
self.assertEqual('[[0]]', T._repr([[T.Constant(0)]]))
class TestProd(unittest.TestCase):
def test_name(self):
p = T.prod([])
self.assertEqual(str(p), 'prod([])')
def test_value(self):
value = T.prod([2, 3]).eval()
self.assertEqual(value, 6)
class TestSameTypes(unittest.TestCase):
def test_all_numpy_array(self):
x = numpy.array([0])
y = numpy.array([1])
z = numpy.array([2])
self.assertTrue(T.same_types(x, y, z))
def test_all_numpy_subclasses(self):
x = numpy.array([0])
y = numpy.array([[1], [2]])
z = numpy.matrix("3,4; 5,6")
self.assertTrue(T.same_types(x, y, z))
@attr.gpu
def test_all_cupy_array(self):
x = cuda.cupy.array([0])
y = cuda.cupy.array([1])
z = cuda.cupy.array([2])
self.assertTrue(T.same_types(x, y, z))
@attr.gpu
def test_numpy_cupy_mixed_1(self):
x = numpy.array([0])
y = cuda.cupy.array([1])
z = numpy.array([2])
self.assertFalse(T.same_types(x, y, z))
@attr.gpu
def test_numpy_cupy_mixed_2(self):
x = cuda.cupy.array([0])
y = numpy.array([1])
z = cuda.cupy.array([2])
self.assertFalse(T.same_types(x, y, z))
testing.run_module(__name__, __file__)
|
|
# -*- coding: utf-8 -*-
import pytest
import os
from requester.parser import (
ResponseParser, RequestParser, RequestFileParser
)
class Teardown(object):
def remove_temp_file(self, temporary_request_file):
try:
os.remove(temporary_request_file)
except OSError, e:
print ("Error : %s is not removed.\n%s" % (temporary_request_file, e))
class TestResponseParserWithoutStream(object):
@pytest.fixture(autouse=True)
def setup(self):
self.response_parser = ResponseParser("")
def test_run(self):
self.response_parser.run()
assert getattr(self.response_parser, "start_line") == []
assert getattr(self.response_parser, "headers") == {}
assert getattr(self.response_parser, "body") == ""
assert getattr(self.response_parser, "stream") == ""
class TestResponseParserWithValidStream(object):
valid_response_stream = (b"HTTP/1.1 200 OK\r\n"
b"Date: Tue, 22 Aug 2017 07:57:39 GMT\r\n"
b"Server: Apache/2.2.22 (Debian)\r\n"
b"Vary: Accept-Encoding\r\n"
b"Content-Length: 54\r\n"
b"Content-Type: text/html; charset=UTF-8\r\n"
b"\r\n"
b"<html><head></head><body>Hello World~!!!</body></html>")
@pytest.fixture(autouse=True)
def setup(self):
self.response_parser = ResponseParser(self.valid_response_stream)
def test_run(self):
self.response_parser.run()
start_line = ["HTTP/1.1", "200", "OK"]
headers = {
"date" : "Tue, 22 Aug 2017 07:57:39 GMT",
"server" : "Apache/2.2.22 (Debian)",
"vary" : "Accept-Encoding",
"content-length" : "54",
"content-type" : "text/html; charset=UTF-8"
}
body = "<html><head></head><body>Hello World~!!!</body></html>"
assert getattr(self.response_parser, "start_line") == start_line
assert getattr(self.response_parser, "headers") == headers
assert getattr(self.response_parser, "body") == body
assert getattr(self.response_parser, "stream") == self.valid_response_stream
class TestResponseParserWithInValidStream(object):
invalid_response_stream = (b"HTTP/1.1 404 Not Found\r\n"
b"Date: Tue, 22 Aug 2017 07:57:39 GMT\r\n"
b"Server: Apache/2.2.22 (Debian)\r\n"
b"Vary: Accept-Encoding\r\n"
b"Content-Type: text/html; charset=UTF-8"
b"<html><head></head><body>Hello World~!!!</body></html>")
@pytest.fixture(autouse=True)
def setup(self):
self.response_parser = ResponseParser(self.invalid_response_stream)
def test_run(self):
self.response_parser.run()
start_line = ["HTTP/1.1", "404", "NOT"]
headers = {
"date" : "Tue, 22 Aug 2017 09:37:39 GMT",
"Server" : "Apache/2.2.22 (Debian)",
"Vary" : "Accept-Encoding",
"content-length" : "54",
"Content-Type" : "text/html; charset=UTF-8"
}
body = "<html><head></head><body>Hello World~!!!\r\n</body></html>"
assert getattr(self.response_parser, "start_line") != start_line
assert getattr(self.response_parser, "headers") != headers
assert getattr(self.response_parser, "body") != body
assert getattr(self.response_parser, "stream") == self.invalid_response_stream
class TestRequestParserWithoutRequest(object):
@pytest.fixture(autouse=True)
def setup(self):
self.request_parser = RequestParser()
def test_run(self):
self.request_parser.run()
assert getattr(self.request_parser, "start_line") == []
assert getattr(self.request_parser, "headers") == {}
assert getattr(self.request_parser, "method") == ""
assert getattr(self.request_parser, "uri") == ""
assert getattr(self.request_parser, "version") == ""
assert getattr(self.request_parser, "body") == ""
assert getattr(self.request_parser, "stream") == ""
class TestRequestParserWithValidGetRequestAndNoBody(object):
valid_request_stream = (b"GET / HTTP/1.1\r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n")
@pytest.fixture(autouse=True)
def setup(self):
self.request_parser = RequestParser(self.valid_request_stream)
def test_run(self):
self.request_parser.run()
start_line = ["GET", "/", "HTTP/1.1"]
headers = {
"host" : "8.8.8.8",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "GET"
uri = "/"
version = "HTTP/1.1"
body = ""
assert getattr(self.request_parser, "start_line") == start_line
assert getattr(self.request_parser, "headers") == headers
assert getattr(self.request_parser, "method") == method
assert getattr(self.request_parser, "uri") == uri
assert getattr(self.request_parser, "version") == version
assert getattr(self.request_parser, "body") == body
assert getattr(self.request_parser, "stream") == self.valid_request_stream
class TestRequestParserWithValidGetRequestHttp1_0(object):
valid_request_stream = (b"GET / HTTP/1.0\r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n")
@pytest.fixture(autouse=True)
def setup(self):
self.request_parser = RequestParser(self.valid_request_stream)
def test_run(self):
self.request_parser.run()
start_line = ["GET", "/", "HTTP/1.0"]
headers = {
"host" : "8.8.8.8",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "GET"
uri = "/"
version = "HTTP/1.0"
body = ""
assert getattr(self.request_parser, "start_line") == start_line
assert getattr(self.request_parser, "headers") == headers
assert getattr(self.request_parser, "method") == method
assert getattr(self.request_parser, "uri") == uri
assert getattr(self.request_parser, "version") == version
assert getattr(self.request_parser, "body") == body
assert getattr(self.request_parser, "stream") == self.valid_request_stream
class TestRequestParserWithValidPostRequestAndNoBody(object):
valid_request_stream = (b"POST /login HTTP/1.1\r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n")
@pytest.fixture(autouse=True)
def setup(self):
self.request_parser = RequestParser(self.valid_request_stream)
def test_run(self):
self.request_parser.run()
start_line = ["POST", "/login", "HTTP/1.1"]
headers = {
"host" : "8.8.8.8",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "POST"
uri = "/login"
version = "HTTP/1.1"
body = ""
assert getattr(self.request_parser, "start_line") == start_line
assert getattr(self.request_parser, "headers") == headers
assert getattr(self.request_parser, "method") == method
assert getattr(self.request_parser, "uri") == uri
assert getattr(self.request_parser, "version") == version
assert getattr(self.request_parser, "body") == body
assert getattr(self.request_parser, "stream") == self.valid_request_stream
class TestRequestParserWithValidPostRequestAndBody(object):
valid_request_stream = (b"POST /login HTTP/1.1\r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Content-Lengh: 10\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n"
b"\r\n"
b"2d2d2d2d2d2d5765624b69")
@pytest.fixture(autouse=True)
def setup(self):
self.request_parser = RequestParser(self.valid_request_stream)
def test_run(self):
self.request_parser.run()
start_line = ["POST", "/login", "HTTP/1.1"]
headers = {
"host" : "8.8.8.8",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"content-lengh" : "10",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "POST"
uri = "/login"
version = "HTTP/1.1"
body = "2d2d2d2d2d2d5765624b69".decode("hex")
assert getattr(self.request_parser, "start_line") == start_line
assert getattr(self.request_parser, "headers") == headers
assert getattr(self.request_parser, "method") == method
assert getattr(self.request_parser, "uri") == uri
assert getattr(self.request_parser, "version") == version
assert getattr(self.request_parser, "body") == body
assert getattr(self.request_parser, "stream") == self.valid_request_stream
class TestRequestParserWithInValidGetRequestAndNoBody(object):
invalid_request_stream = (b"GET / HTTP/1.0 \r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n")
@pytest.fixture(autouse=True)
def setup(self):
self.request_parser = RequestParser(self.invalid_request_stream)
def test_run(self):
self.request_parser.run()
start_line = ["GET", "/", "HTTP/1.0"]
headers = {
"Host" : "8.8.8.8",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"upgrade-insecure-requests" : "10",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "get"
uri = "/ "
version = "HTTP/1.0"
body = ""
assert getattr(self.request_parser, "start_line") != start_line
assert getattr(self.request_parser, "headers") != headers
assert getattr(self.request_parser, "method") != method
assert getattr(self.request_parser, "uri") != uri
assert getattr(self.request_parser, "version") != version
assert getattr(self.request_parser, "body") == body
assert getattr(self.request_parser, "stream") == self.invalid_request_stream
class TestRequestParserWithInValidPostRequestAndNoBody(object):
invalid_request_stream = (b"post /login http/1.1\r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n"
b"\r\n")
@pytest.fixture(autouse=True)
def setup(self):
self.request_parser = RequestParser(self.invalid_request_stream)
def test_run(self):
self.request_parser.run()
start_line = ["POST", " /login", "HTTP/1.1"]
headers = {
"host" : "8.8.8.4",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "POST"
uri = "/login "
version = "HTTP/1.1 "
body = ""
assert getattr(self.request_parser, "start_line") != start_line
assert getattr(self.request_parser, "headers") != headers
assert getattr(self.request_parser, "method") != method
assert getattr(self.request_parser, "uri") != uri
assert getattr(self.request_parser, "version") != version
assert getattr(self.request_parser, "body") == body
assert getattr(self.request_parser, "stream") == self.invalid_request_stream
class TestRequestParserWithInValidPostRequestAndInValidBody(object):
invalid_request_stream = (b"POST /login HTTP/1.1\r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Content-Lengh: 10\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n"
b"2d2d2d2d2d2d5765624b69")
@pytest.fixture(autouse=True)
def setup(self):
self.request_parser = RequestParser(self.invalid_request_stream)
def test_run(self):
self.request_parser.run()
start_line = ["post", "/login ", "http/1.1"]
headers = {
"Host" : "8.8.8.8",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"content-lengh" : "10",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "post"
uri = "/"
version = "HTTP/1.1 "
body = "2d2d2d2d2d2d5765624b69".decode("hex")
assert getattr(self.request_parser, "start_line") != start_line
assert getattr(self.request_parser, "headers") != headers
assert getattr(self.request_parser, "method") != method
assert getattr(self.request_parser, "uri") != uri
assert getattr(self.request_parser, "version") != version
assert getattr(self.request_parser, "body") != body
assert getattr(self.request_parser, "stream") == self.invalid_request_stream
class TestRequestParserWithInValidPostRequestAndValidBody(object):
invalid_request_stream = (b"POST /login HTTP/1.1\r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Content-Lengh: 10\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n"
b"\r\n"
b"------WebKit")
@pytest.fixture(autouse=True)
def setup(self):
self.request_parser = RequestParser(self.invalid_request_stream)
def test_run(self):
self.request_parser.run()
start_line = ["post", "/login ", "http/1.1"]
headers = {
"Host" : "8.8.8.9",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"content-lengh" : "10",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "post"
uri = "/"
version = "HTTP/1.1 "
body = "2d2d2d2d2d2d5765624b69".decode("hex")
assert getattr(self.request_parser, "start_line") != start_line
assert getattr(self.request_parser, "headers") != headers
assert getattr(self.request_parser, "method") != method
assert getattr(self.request_parser, "uri") != uri
assert getattr(self.request_parser, "version") != version
assert getattr(self.request_parser, "body") != body
assert getattr(self.request_parser, "stream") == self.invalid_request_stream
class TestRequestFileParserWithoutFileName(object):
@pytest.fixture(autouse=True)
def setup(self):
self.request_file_parser = RequestFileParser("")
def test_run(self):
self.request_file_parser.run()
assert getattr(self.request_file_parser, "start_line") == []
assert getattr(self.request_file_parser, "headers") == {}
assert getattr(self.request_file_parser, "method") == ""
assert getattr(self.request_file_parser, "uri") == ""
assert getattr(self.request_file_parser, "version") == ""
assert getattr(self.request_file_parser, "body") == ""
assert getattr(self.request_file_parser, "stream") == ""
assert getattr(self.request_file_parser, "file_name") == ""
class TestRequestFileParserWithFileNameAndValidStreamAndNoBody(Teardown):
temporary_request_file = "temporary_request_file"
valid_request_stream = (b"GET / HTTP/1.1\r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n")
@pytest.fixture(autouse=True)
def setup(self):
with open(self.temporary_request_file, "w") as f:
f.write(self.valid_request_stream)
self.request_file_parser = RequestFileParser(self.temporary_request_file)
def test_run(self):
self.request_file_parser.run()
start_line = ["GET", "/", "HTTP/1.1"]
headers = {
"host" : "8.8.8.8",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "GET"
uri = "/"
version = "HTTP/1.1"
body = ""
assert getattr(self.request_file_parser, "start_line") == start_line
assert getattr(self.request_file_parser, "headers") == headers
assert getattr(self.request_file_parser, "method") == method
assert getattr(self.request_file_parser, "uri") == uri
assert getattr(self.request_file_parser, "version") == version
assert getattr(self.request_file_parser, "body") == body
assert getattr(self.request_file_parser, "stream") == ""
Teardown.remove_temp_file(self, self.temporary_request_file)
class TestRequestFileParserWithFileNameAndValidStreamAndBody(Teardown):
temporary_request_file = "temporary_request_file"
valid_request_stream = (b"POST /login HTTP/1.1\r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Content-Lengh: 10\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n"
b"\r\n"
b"2d2d2d2d2d2d5765624b69")
@pytest.fixture(autouse=True)
def setup(self):
with open(self.temporary_request_file, "w") as f:
f.write(self.valid_request_stream)
self.request_file_parser = RequestFileParser(self.temporary_request_file)
def test_run(self):
self.request_file_parser.run()
start_line = ["POST", "/login", "HTTP/1.1"]
headers = {
"host" : "8.8.8.8",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"content-lengh" : "10",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "POST"
uri = "/login"
version = "HTTP/1.1"
body = "2d2d2d2d2d2d5765624b69".decode("hex")
assert getattr(self.request_file_parser, "start_line") == start_line
assert getattr(self.request_file_parser, "headers") == headers
assert getattr(self.request_file_parser, "method") == method
assert getattr(self.request_file_parser, "uri") == uri
assert getattr(self.request_file_parser, "version") == version
assert getattr(self.request_file_parser, "body") == body
assert getattr(self.request_file_parser, "stream") == ""
Teardown.remove_temp_file(self, self.temporary_request_file)
class TestRequestFileParserWithFileNameAndInValidStreamAndBody(Teardown):
temporary_request_file = "temporary_request_file"
valid_request_stream = (b"post / HTTP/1.0\r\n"
b"Host: 8.8.8.8\r\n"
b"Connection: keep-alive\r\n"
b"Cache-Control: max-age=0\r\n"
b"Upgrade-Insecure-Requests: 1\r\n"
b"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\r\n"
b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n"
b"Accept-Encoding: gzip, deflate\r\n"
b"Accept-Language: ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4\r\n"
b"\r\n"
b"------WebKit")
@pytest.fixture(autouse=True)
def setup(self):
with open(self.temporary_request_file, "w") as f:
f.write(self.valid_request_stream)
self.request_file_parser = RequestFileParser(self.temporary_request_file)
def test_run(self):
self.request_file_parser.run()
start_line = ["POST", "/login", "HTTP/1.1"]
headers = {
"host" : "8.8.8.8",
"connection" : "keep-alive",
"cache-control" : "max-age=0",
"content-lengh" : "10",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate",
"accept-language" : "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4"
}
method = "POST"
uri = "/login"
version = "HTTP/1.1"
body = "2d2d2d2d2d2d5765624b69".decode("hex")
assert getattr(self.request_file_parser, "start_line") != start_line
assert getattr(self.request_file_parser, "headers") != headers
assert getattr(self.request_file_parser, "method") != method
assert getattr(self.request_file_parser, "uri") != uri
assert getattr(self.request_file_parser, "version") != version
assert getattr(self.request_file_parser, "body") != body
assert getattr(self.request_file_parser, "stream") == ""
Teardown.remove_temp_file(self, self.temporary_request_file)
|
|
# -*- coding: utf-8 -*-
"""
unit test for mds
"""
import ConfigParser as configparser
import logging
import eventlet
from nose.tools import eq_
from simplecfs.mds.server import MDSServer
from simplecfs.common.parameters import * # NOQA
from simplecfs.message.packet import MakeDirPacket, RemoveDirPacket,\
ListDirPacket, StatusDirPacket, ValidDirPacket, AddDSPacket,\
ReportDSPacket, AddFilePacket, AddFileCommitPacket, StatFilePacket,\
DeleteFilePacket, GetFilePacket, GetObjPacket, GetChkPacket,\
RepairChkPacket, RepairChkCommitPacket
from simplecfs.message.network_handler import send_command, recv_command
MDS_CONFIG_FILE = './conf/mds.cfg'
CLIENT_CONFIG_FILE = './conf/client.cfg'
POOL = eventlet.GreenPool(10)
def start_mds(mds):
print 'start mds server'
mds.start()
def get_new_connection():
# get config options
config = configparser.ConfigParser()
config.read(CLIENT_CONFIG_FILE)
mds_ip = config.get('mds', 'mds_ip')
mds_port = config.getint('mds', 'mds_port')
print 'connect to server'
return eventlet.connect((mds_ip, mds_port))
def make_dir(dirname='/testdir/'):
"""test function: make_dir(dirname)
dirname should be absolute path and end with '/'
"""
dirname = dirname.strip()
if not dirname.endswith('/'):
dirname += '/'
print 'make dirname %s' % dirname
packet = MakeDirPacket(dirname)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def remove_dir(dirname='/testdir/'):
"""test function: remove_dir(dirname)
dirname should be absolute path and end with '/'
"""
dirname = dirname.strip()
if not dirname.endswith('/'):
dirname += '/'
print 'remove dirname %s' % dirname
packet = RemoveDirPacket(dirname)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def list_dir(dirname='/'):
"""test function: list_dir(dirname)
dirname should be absolute path and end with '/'
"""
dirname = dirname.strip()
if not dirname.endswith('/'):
dirname += '/'
print 'list dirname %s' % dirname
packet = ListDirPacket(dirname)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def status_dir(dirname='/testdir/'):
"""test function: status_dir(dirname)
dirname should be absolute path and end with '/'
"""
dirname = dirname.strip()
if not dirname.endswith('/'):
dirname += '/'
print 'status dirname %s' % dirname
packet = StatusDirPacket(dirname)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def valid_dir(dirname='/testdir/'):
"""test function: valid_dir(dirname)
dirname should be absolute path and end with '/'
"""
dirname = dirname.strip()
if not dirname.endswith('/'):
dirname += '/'
print 'valid dirname %s' % dirname
packet = ValidDirPacket(dirname)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def add_ds(rack_id=0, ds_ip='127.0.0.1', ds_port=7000):
"""test function: add_ds(rack_id, ds_ip, ds_port)
"""
print 'add ds, rack_id:%d ip:%s port:%d' % (rack_id, ds_ip, ds_port)
packet = AddDSPacket(rack_id, ds_ip, ds_port)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def report_ds(ds_ip='127.0.0.1', ds_port=7000, status=DS_CONNECTED):
"""test function: report_ds(info)
report ds state info to mds
"""
info = {
'space': 102400,
'chunk_num': 898,
'status': status,
}
packet = ReportDSPacket(ds_ip, ds_port, info)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def add_file(filename='/testfile', fileinfo={}):
"""
test function: add_file(filename, fileinfo)
filename should be absolute path,
finleinfo contain all the info in dict format:
fileinfo = {
"filename": filename,
"filesize": 1048576,
"block_size": 512,
"code": {
"type": "rs", # "rs/crs/zcode/etc.",
"k": 2,
"m": 2,
"w": 8,
},
}
"""
fileinfo = {
"filesize": 20480,
"code": {
"type": CODE_RS, # "rs/crs/zcode/etc.",
"k": 2,
"m": 2,
"w": 8,
"packet_size": 512,
"block_size": 1024,
},
}
print 'add file %s' % filename
print 'file info:'
print fileinfo
packet = AddFilePacket(filename, fileinfo)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def add_file_commit(filename='/testfile'):
"""
test function: add_file_commit(filename)
filename should be absolute path,
"""
print 'add file commit %s' % filename
packet = AddFileCommitPacket(filename)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def stat_file(filename='/testfile'):
"""
test function: stat_file(filename)
filename should be absolute path,
"""
print 'stat file %s' % filename
packet = StatFilePacket(filename)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def delete_file(filename='/testfile'):
"""
test function: delete_file(filename)
filename should be absolute path,
"""
print 'delete file %s' % filename
packet = DeleteFilePacket(filename)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def get_file(filepath='/testfile'):
"""
test function: get_file(filepath)
filepath should be absolute path,
"""
print 'get file %s' % filepath
packet = GetFilePacket(filepath)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def get_obj(obj_id='/testfile_obj0'):
"""
test function: get_obj(obj_id)
"""
print 'get obj %s' % obj_id
packet = GetObjPacket(obj_id)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def get_chk(chk_id='/testfile_obj0_chk0'):
"""
test function: get_chk(chk_id)
"""
print 'get chk %s' % chk_id
packet = GetChkPacket(chk_id)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def repair_chk(chk_id='/testfile_obj0_chk0'):
"""
test function: repair_chk(chk_id)
"""
print 'repair chk %s' % chk_id
packet = RepairChkPacket(chk_id)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def repair_chk_commit(chk_id='/testfile_obj0_chk0',
ds_id='127.0.0.1:7000'):
"""
test function: repair_chk_commit(chk_id, ds_id)
"""
print 'repair chk commit %s %s' % (chk_id, ds_id)
packet = RepairChkCommitPacket(chk_id, ds_id)
msg = packet.get_message()
sock = get_new_connection()
sock_fd = sock.makefile('rw')
logging.info('%s', msg)
send_command(sock_fd, msg)
recv = recv_command(sock_fd)
print recv
logging.info('recv: %s', recv)
sock_fd.close()
return recv
def test_mds():
# start the mds
config = configparser.ConfigParser()
config.read(MDS_CONFIG_FILE)
# start server
mds = MDSServer(config, test=True)
POOL.spawn_n(start_mds, mds)
# start test mds
dirname = '/testdir/'
ret = make_dir(dirname)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_MAKE_DIR_REPLY)
ret = list_dir('/')
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_LIST_DIR_REPLY)
eq_(True, dirname in ret['info'])
ret = list_dir(dirname)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_LIST_DIR_REPLY)
eq_([], ret['info'])
ret = status_dir(dirname)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_STATUS_DIR_REPLY)
ret = status_dir('/nosuchdir/')
eq_(ret['state'], RET_FAILURE)
eq_(ret['method'], OP_STATUS_DIR_REPLY)
ret = valid_dir(dirname)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_VALID_DIR_REPLY)
ret = valid_dir('/nosuchdir/')
eq_(ret['state'], RET_FAILURE)
eq_(ret['method'], OP_VALID_DIR_REPLY)
ret = remove_dir('/nosuchdir/')
eq_(ret['state'], RET_FAILURE)
eq_(ret['method'], OP_REMOVE_DIR_REPLY)
ret = remove_dir(dirname)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_REMOVE_DIR_REPLY)
ret = list_dir('/')
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_LIST_DIR_REPLY)
eq_(False, dirname in ret['info'])
ret = status_dir(dirname)
eq_(ret['state'], RET_FAILURE)
eq_(ret['method'], OP_STATUS_DIR_REPLY)
ret = valid_dir(dirname)
eq_(ret['state'], RET_FAILURE)
eq_(ret['method'], OP_VALID_DIR_REPLY)
ret = add_ds(rack_id=0, ds_ip='127.0.0.1', ds_port=7000)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_DS_REPLY)
ret = report_ds()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_REPORT_DS_REPLY)
ret = add_ds(rack_id=0, ds_ip='127.0.0.1', ds_port=7001)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_DS_REPLY)
ret = report_ds(ds_ip='127.0.0.1', ds_port=7001, status=DS_BROKEN)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_REPORT_DS_REPLY)
ret = add_ds(rack_id=0, ds_ip='127.0.0.1', ds_port=7002)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_DS_REPLY)
ret = add_ds(rack_id=0, ds_ip='127.0.0.1', ds_port=7003)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_DS_REPLY)
ret = add_ds(rack_id=0, ds_ip='127.0.0.1', ds_port=7004)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_DS_REPLY)
ret = add_ds(rack_id=0, ds_ip='127.0.0.1', ds_port=7005)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_DS_REPLY)
ret = add_ds(rack_id=0, ds_ip='127.0.0.1', ds_port=7006)
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_DS_REPLY)
ret = add_file()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_FILE_REPLY)
ret = add_file_commit()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_FILE_COMMIT_REPLY)
ret = stat_file()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_STAT_FILE_REPLY)
ret = delete_file()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_DELETE_FILE_REPLY)
ret = add_file()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_FILE_REPLY)
ret = add_file_commit()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_ADD_FILE_COMMIT_REPLY)
ret = get_file()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_GET_FILE_REPLY)
ret = get_obj()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_GET_OBJ_REPLY)
ret = get_chk()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_GET_CHK_REPLY)
ret = repair_chk()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_REPAIR_CHK_REPLY)
ret = repair_chk_commit()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_REPAIR_CHK_COMMIT_REPLY)
ret = delete_file()
eq_(ret['state'], RET_SUCCESS)
eq_(ret['method'], OP_DELETE_FILE_REPLY)
|
|
"""Support for the (unofficial) Tado API."""
import asyncio
from datetime import timedelta
import logging
from PyTado.interface import Tado
from requests import RequestException
import requests.exceptions
import voluptuous as vol
from homeassistant.components.climate.const import PRESET_AWAY, PRESET_HOME
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import Throttle
from .const import (
CONF_FALLBACK,
DATA,
DOMAIN,
SIGNAL_TADO_UPDATE_RECEIVED,
UPDATE_LISTENER,
UPDATE_TRACK,
)
_LOGGER = logging.getLogger(__name__)
TADO_COMPONENTS = ["sensor", "climate", "water_heater"]
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
SCAN_INTERVAL = timedelta(seconds=15)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_FALLBACK, default=True): cv.boolean,
}
],
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Tado component."""
hass.data.setdefault(DOMAIN, {})
if DOMAIN not in config:
return True
for conf in config[DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf,
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Tado from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
fallback = entry.options.get(CONF_FALLBACK, True)
tadoconnector = TadoConnector(hass, username, password, fallback)
try:
await hass.async_add_executor_job(tadoconnector.setup)
except KeyError:
_LOGGER.error("Failed to login to tado")
return False
except RuntimeError as exc:
_LOGGER.error("Failed to setup tado: %s", exc)
return ConfigEntryNotReady
except requests.exceptions.HTTPError as ex:
if ex.response.status_code > 400 and ex.response.status_code < 500:
_LOGGER.error("Failed to login to tado: %s", ex)
return False
raise ConfigEntryNotReady
# Do first update
await hass.async_add_executor_job(tadoconnector.update)
# Poll for updates in the background
update_track = async_track_time_interval(
hass, lambda now: tadoconnector.update(), SCAN_INTERVAL,
)
update_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
DATA: tadoconnector,
UPDATE_TRACK: update_track,
UPDATE_LISTENER: update_listener,
}
for component in TADO_COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
if CONF_FALLBACK not in options:
options[CONF_FALLBACK] = entry.data.get(CONF_FALLBACK, True)
hass.config_entries.async_update_entry(entry, options=options)
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in TADO_COMPONENTS
]
)
)
hass.data[DOMAIN][entry.entry_id][UPDATE_TRACK]()
hass.data[DOMAIN][entry.entry_id][UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class TadoConnector:
"""An object to store the Tado data."""
def __init__(self, hass, username, password, fallback):
"""Initialize Tado Connector."""
self.hass = hass
self._username = username
self._password = password
self._fallback = fallback
self.device_id = None
self.tado = None
self.zones = None
self.devices = None
self.data = {
"zone": {},
"device": {},
}
@property
def fallback(self):
"""Return fallback flag to Smart Schedule."""
return self._fallback
def setup(self):
"""Connect to Tado and fetch the zones."""
self.tado = Tado(self._username, self._password)
self.tado.setDebugging(True)
# Load zones and devices
self.zones = self.tado.getZones()
self.devices = self.tado.getMe()["homes"]
self.device_id = self.devices[0]["id"]
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update the registered zones."""
for zone in self.zones:
self.update_sensor("zone", zone["id"])
for device in self.devices:
self.update_sensor("device", device["id"])
def update_sensor(self, sensor_type, sensor):
"""Update the internal data from Tado."""
_LOGGER.debug("Updating %s %s", sensor_type, sensor)
try:
if sensor_type == "zone":
data = self.tado.getZoneState(sensor)
elif sensor_type == "device":
devices_data = self.tado.getDevices()
if not devices_data:
_LOGGER.info("There are no devices to setup on this tado account")
return
data = devices_data[0]
else:
_LOGGER.debug("Unknown sensor: %s", sensor_type)
return
except RuntimeError:
_LOGGER.error(
"Unable to connect to Tado while updating %s %s", sensor_type, sensor,
)
return
self.data[sensor_type][sensor] = data
_LOGGER.debug(
"Dispatching update to %s %s %s: %s",
self.device_id,
sensor_type,
sensor,
data,
)
dispatcher_send(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(self.device_id, sensor_type, sensor),
)
def get_capabilities(self, zone_id):
"""Return the capabilities of the devices."""
return self.tado.getCapabilities(zone_id)
def reset_zone_overlay(self, zone_id):
"""Reset the zone back to the default operation."""
self.tado.resetZoneOverlay(zone_id)
self.update_sensor("zone", zone_id)
def set_presence(
self, presence=PRESET_HOME,
):
"""Set the presence to home or away."""
if presence == PRESET_AWAY:
self.tado.setAway()
elif presence == PRESET_HOME:
self.tado.setHome()
def set_zone_overlay(
self,
zone_id=None,
overlay_mode=None,
temperature=None,
duration=None,
device_type="HEATING",
mode=None,
fan_speed=None,
swing=None,
):
"""Set a zone overlay."""
_LOGGER.debug(
"Set overlay for zone %s: overlay_mode=%s, temp=%s, duration=%s, type=%s, mode=%s fan_speed=%s swing=%s",
zone_id,
overlay_mode,
temperature,
duration,
device_type,
mode,
fan_speed,
swing,
)
try:
self.tado.setZoneOverlay(
zone_id,
overlay_mode,
temperature,
duration,
device_type,
"ON",
mode,
fanSpeed=fan_speed,
swing=swing,
)
except RequestException as exc:
_LOGGER.error("Could not set zone overlay: %s", exc)
self.update_sensor("zone", zone_id)
def set_zone_off(self, zone_id, overlay_mode, device_type="HEATING"):
"""Set a zone to off."""
try:
self.tado.setZoneOverlay(
zone_id, overlay_mode, None, None, device_type, "OFF"
)
except RequestException as exc:
_LOGGER.error("Could not set zone overlay: %s", exc)
self.update_sensor("zone", zone_id)
|
|
#!/usr/bin/env python3
# coding=utf-8
"""
Integration tests for the complete framework
This file automatically discovers all Compiler subclasses in 'plugins/${package}/tests/*' and runs all programs
against them and their declared plugins concurrently.
"""
from contextlib import suppress
import importlib
import logging
import multiprocessing
import os
import shutil
import time
import unittest
from install import main
from lib import get_subclasses
from lib.configuration import dependencies
from lib.constants import PLUGINS_PATH, ROOT_PATH
from lib.exceptions import ProgramNotInstalledException
from lib.logger import setup_logging
from lib.parsers.configuration import get_global_conf, get_trigger_conf
from lib.plugins import BasePlugin
import run
from tests import TEST_DIRECTORY, SAVE_DIRECTORY
from tests.lib.structures import Compiler, Program, Plugin
__author__ = "Benjamin Schubert, benjamin.schubert@epfl.ch"
# prepare a manager for synchronization
RESOURCES_MANAGER = multiprocessing.Manager() # pylint: disable=no-member
def bound_value(value: int, minimum: int=0, maximum: int=9) -> int:
"""
Bounds a value between an upper and lower bound
:param value: the value to bound
:param minimum: the minimal allowed value
:param maximum: the maximal allowed value
:return: the bounded value
"""
return minimum if value < minimum else maximum if value > maximum else value
class TestRunner(unittest.TestCase):
"""
The Test runner, containing all integration tests, numbered
"""
log_directory = os.path.join(get_global_conf().getdir("trigger", "default_directory"), "tests-results", "logs")
_multiprocess_can_split_ = True
@classmethod
def setUpClass(cls) -> None:
"""
The class setup, ensures the log directory is ready
"""
# pylint: disable=no-member
get_global_conf().set("install", "make_args", "-j,-l{}".format(multiprocessing.cpu_count()))
setup_logging()
class EnvManager: # pylint: disable=too-few-public-methods
"""
An environment manager for the runs. Saves automatically logs of failing runs
"""
def __init__(self, _compiler_: Compiler, file_suffix: str):
self.compiler = _compiler_
self.filename = "{}-{}-{}-{}.txt".format(
_compiler_.package, _compiler_.name, "{}wllvm".format("" if _compiler_.bitcode else "no-"), file_suffix
)
def __enter__(self):
logging.getLogger().setLevel(0)
wllvm = "wllvm" if self.compiler.bitcode else "no-wllvm"
get_global_conf().set("install", "compiler", "{}.{}".format(self.compiler.package, self.compiler.name))
get_global_conf().set("install", "llvm_bitcode", str(self.compiler.bitcode))
get_global_conf()["DEFAULT"]["default_directory"] = \
os.path.join(TEST_DIRECTORY, self.compiler.package, self.compiler.name, wllvm)
get_global_conf().set("install", "source_directory", os.path.join(ROOT_PATH, "src"))
handlers = logging.getLogger().handlers
while len(handlers) > 0:
handlers[0].close()
logging.getLogger().removeHandler(handlers[0])
logging.getLogger().addHandler(logging.FileHandler(os.path.join(TestRunner.log_directory, self.filename)))
get_global_conf().set(
"plugins",
"enabled_plugins",
",".join(["{}.{}".format(plugin.package, plugin.name) for plugin in self.compiler.plugins])
)
for plugin in self.compiler.plugins:
importlib.import_module("plugins.{}.{}".format(plugin.package, plugin.name))
# noinspection PyUnusedLocal
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None: # pragma nocover as this is only on fail and our tests should not fail
shutil.move(
os.path.join(TestRunner.log_directory, self.filename),
os.path.join(SAVE_DIRECTORY, self.filename)
) # pragma nocover idem
def configure(self, compiler: Compiler) -> None:
"""
Configures the environment to run with the given compiler
:param compiler: the compiler to use
"""
try:
with TestRunner.EnvManager(compiler, "configure"):
self.assertFalse(
dependencies.install(False), "Could not install dependencies for {} with{} bitcode".format(
compiler.name, "out" if compiler.bitcode else ""
)
)
for plugin_info in compiler.plugins:
log_file = "{}-{}-configuration".format(plugin_info.package, plugin_info.name)
with TestRunner.EnvManager(compiler, log_file):
plugin = [
subclass for subclass in get_subclasses(BasePlugin)
if subclass.__name__.lower() == plugin_info.name
][0]
self.assertFalse(plugin().configure(force=False))
finally:
compiler.is_configured.set()
def compile(self, _compiler_: Compiler, _program_: Program) -> None:
"""
Compiles and installs the given program with the given compiler
:param _compiler_: the compiler to use
:param _program_: the program to compile
"""
_compiler_.is_configured.wait()
try:
with TestRunner.EnvManager(_compiler_, _program_.name):
error = main([_program_.name], True, 1)
self.assertFalse(error, "The program {} failed to compile with {}".format(
_program_.name, get_global_conf().get("install", "compiler")))
# Checks that bitcode was indeed created at the correct place
if _compiler_.bitcode:
conf = get_trigger_conf(_program_.name)
self.assertTrue(os.path.exists(conf.get_executable() + ".bc"))
finally:
_program_.is_installed.set()
def run_main_plugin(self, _compiler_: Compiler, _plugin_: Plugin, _program_: Program):
"""
Runs the given plugin against the given program
:param _compiler_: the compiler used to compile the program
:param _plugin_: the plugin to test
:param _program_: the program to test
"""
_program_.is_installed.wait()
with _program_.lock, TestRunner.EnvManager(_compiler_, "{}-{}".format(_plugin_.name, _program_.name)):
plugin = [
subclass for subclass in get_subclasses(BasePlugin)
if subclass.__name__.lower() == _plugin_.name
][0]
if hasattr(_plugin_, "main_plugin"):
analysis_plugins = [plugin]
plugin = _plugin_.main_plugin
else:
analysis_plugins = []
_plugin_.pre_run()
# noinspection PyBroadException
try:
self.assertFalse(run.trigger_bug(_program_.name, plugin(), analysis_plugins=analysis_plugins))
except ProgramNotInstalledException:
raise unittest.SkipTest("{} is not installed".format(_program_.name))
except Exception: # with concurrency, tests might fail. Let's retry once
time.sleep(2) # let's sleep a bit before, timing might be bad
self.assertFalse(run.trigger_bug(_program_.name, plugin(), analysis_plugins=analysis_plugins))
time.sleep(2)
def load_compilers() -> None:
"""
Imports all tests in the plugins/packages directory, to allow for Compiler instance discoveries
"""
for package in os.listdir(PLUGINS_PATH):
if os.path.isdir(os.path.join(PLUGINS_PATH, package)) and package != "__pycache__":
for test_file in os.listdir(os.path.join(PLUGINS_PATH, package, "tests")):
importlib.import_module("plugins.{}.tests.{}".format(package, os.path.splitext(test_file)[0]))
def add_plugin_run(_compiler_: Compiler, _program_: Program, plugin: Plugin) -> None:
"""
Adds a plugin to run against the given program
:param _compiler_:
:param _program_:
:param plugin:
:return:
"""
function_name = "test_9{}_{}_{}".format(bound_value(plugin.priority), plugin.name, _program_.name)
setattr(
TestRunner,
function_name,
lambda x, comp=_compiler_, prog=_program_, plug=plugin: TestRunner.run_main_plugin(x, comp, plug, prog)
)
setattr(getattr(TestRunner, function_name), "__name__", function_name)
def add_programs_compile(compiler: Compiler) -> None:
"""
For all programs, add them to the compiler run list and register a plugin call for them
:param compiler: the compiler to use
"""
for program_name in get_global_conf().getlist("install", "programs"):
program = Program(program_name, RESOURCES_MANAGER.Lock(), RESOURCES_MANAGER.Event())
function_name = "test_5{}_{}_{}_{}wllvm_{}".format(
bound_value(compiler.priority),
compiler.package,
compiler.name,
"no-" if not compiler.bitcode else "",
program.name
)
setattr(
TestRunner,
function_name,
lambda x, comp=compiler, prog=program: TestRunner.compile(x, comp, prog)
)
setattr(getattr(TestRunner, function_name), "__name__", function_name)
for plugin in compiler.plugins:
add_plugin_run(compiler, program, plugin)
def add_compilers(_compilers_: list) -> None:
"""
For all compiler in _compilers_, add a configure script for them and register all programs for them
:param _compilers_: the list of compilers to use
"""
for compiler_class in _compilers_:
compiler = compiler_class(RESOURCES_MANAGER.Event())
function_name = \
"test_1{}_{}_{}_{}wllvm".format(
bound_value(compiler.priority),
compiler.package,
compiler.name,
"no-" if not compiler.bitcode else ""
)
setattr(TestRunner, function_name, lambda x, comp=compiler: TestRunner.configure(x, comp))
setattr(getattr(TestRunner, function_name), "__name__", function_name)
add_programs_compile(compiler)
def clean_working_directory() -> None:
""" Removes old logs before running """
with suppress(FileNotFoundError):
shutil.rmtree(TestRunner.log_directory)
with suppress(FileNotFoundError):
shutil.rmtree(SAVE_DIRECTORY)
os.makedirs(TestRunner.log_directory)
os.makedirs(SAVE_DIRECTORY)
# Add all functions to TestRunner on initialization
clean_working_directory()
load_compilers()
COMPILERS = get_subclasses(Compiler)
add_compilers(COMPILERS)
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
from datetime import datetime
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, File
import metron_service
import metron_security
# Wrap major operations and functionality in this class
class IndexingCommands:
__params = None
__indexing_topic = None
__indexing_topology = None
__configured = False
__acl_configured = False
__hdfs_perm_configured = False
__hbase_configured = False
__hbase_acl_configured = False
def __init__(self, params):
if params is None:
raise ValueError("params argument is required for initialization")
self.__params = params
self.__indexing_topology = params.metron_indexing_topology
self.__indexing_topic = params.indexing_input_topic
self.__configured = os.path.isfile(self.__params.indexing_configured_flag_file)
self.__acl_configured = os.path.isfile(self.__params.indexing_acl_configured_flag_file)
self.__hbase_configured = os.path.isfile(self.__params.indexing_hbase_configured_flag_file)
self.__hbase_acl_configured = os.path.isfile(self.__params.indexing_hbase_acl_configured_flag_file)
self.__hdfs_perm_configured = os.path.isfile(self.__params.indexing_hdfs_perm_configured_flag_file)
def is_configured(self):
return self.__configured
def is_acl_configured(self):
return self.__acl_configured
def is_hdfs_perm_configured(self):
return self.__hdfs_perm_configured
def is_hbase_configured(self):
return self.__hbase_configured
def is_hbase_acl_configured(self):
return self.__hbase_acl_configured
def set_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_configured_flag_file, "Setting Indexing configured to True")
def set_hbase_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_hbase_configured_flag_file, "Setting HBase configured to True for indexing")
def set_hbase_acl_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_hbase_acl_configured_flag_file, "Setting HBase ACL configured to True for indexing")
def set_acl_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_acl_configured_flag_file, "Setting Indexing ACL configured to True")
def set_hdfs_perm_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_hdfs_perm_configured_flag_file, "Setting HDFS perm configured to True")
def create_hbase_tables(self):
Logger.info("Creating HBase Tables for indexing")
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.hbase_keytab_path,
self.__params.hbase_principal_name,
execute_user=self.__params.hbase_user)
cmd = "echo \"create '{0}','{1}'\" | hbase shell -n"
add_update_cmd = cmd.format(self.__params.update_hbase_table, self.__params.update_hbase_cf)
Execute(add_update_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=self.__params.hbase_user
)
Logger.info("Done creating HBase Tables for indexing")
self.set_hbase_configured()
def set_hbase_acls(self):
Logger.info("Setting HBase ACLs for indexing")
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.hbase_keytab_path,
self.__params.hbase_principal_name,
execute_user=self.__params.hbase_user)
cmd = "echo \"grant '{0}', 'RW', '{1}'\" | hbase shell -n"
add_update_acl_cmd = cmd.format(self.__params.metron_user, self.__params.update_hbase_table)
Execute(add_update_acl_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=self.__params.hbase_user
)
Logger.info("Done setting HBase ACLs for indexing")
self.set_hbase_acl_configured()
def init_kafka_topics(self):
Logger.info('Creating Kafka topics for indexing')
metron_service.init_kafka_topics(self.__params, [self.__indexing_topic])
def init_kafka_acls(self):
Logger.info('Creating Kafka ACLs for indexing')
# Indexed topic names matches the group
metron_service.init_kafka_acls(self.__params, [self.__indexing_topic], [self.__indexing_topic])
def init_hdfs_dir(self):
Logger.info('Setting up HDFS indexing directory')
# Non Kerberized Metron runs under 'storm', requiring write under the 'hadoop' group.
# Kerberized Metron runs under it's own user.
ownership = 0755 if self.__params.security_enabled else 0775
Logger.info('HDFS indexing directory ownership is: ' + str(ownership))
self.__params.HdfsResource(self.__params.metron_apps_indexed_hdfs_dir,
type="directory",
action="create_on_execute",
owner=self.__params.metron_user,
group=self.__params.hadoop_group,
mode=ownership,
)
Logger.info('Done creating HDFS indexing directory')
def start_indexing_topology(self, env):
Logger.info('Starting ' + self.__indexing_topology)
if not self.is_topology_active(env):
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
start_cmd_template = """{0}/bin/start_elasticsearch_topology.sh \
-s {1} \
-z {2}"""
start_cmd = start_cmd_template.format(self.__params.metron_home,
self.__indexing_topology,
self.__params.zookeeper_quorum)
Execute(start_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info('Indexing topology already running')
Logger.info('Finished starting indexing topology')
def stop_indexing_topology(self, env):
Logger.info('Stopping ' + self.__indexing_topology)
if self.is_topology_active(env):
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
stop_cmd = 'storm kill ' + self.__indexing_topology
Execute(stop_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info("Indexing topology already stopped")
Logger.info('Done stopping indexing topologies')
def restart_indexing_topology(self, env):
Logger.info('Restarting the indexing topologies')
self.stop_indexing_topology(env)
# Wait for old topology to be cleaned up by Storm, before starting again.
retries = 0
topology_active = self.is_topology_active(env)
while self.is_topology_active(env) and retries < 3:
Logger.info('Existing topology still active. Will wait and retry')
time.sleep(10)
retries += 1
if not topology_active:
Logger.info('Waiting for storm kill to complete')
time.sleep(30)
self.start_indexing_topology(env)
Logger.info('Done restarting the indexing topologies')
else:
Logger.warning('Retries exhausted. Existing topology not cleaned up. Aborting topology start.')
def is_topology_active(self, env):
env.set_params(self.__params)
active = True
topologies = metron_service.get_running_topologies(self.__params)
is_running = False
if self.__indexing_topology in topologies:
is_running = topologies[self.__indexing_topology] in ['ACTIVE', 'REBALANCING']
active &= is_running
return active
|
|
import json
from django.contrib.postgres import lookups
from django.contrib.postgres.forms import SimpleArrayField
from django.contrib.postgres.validators import ArrayMaxLengthValidator
from django.core import checks, exceptions
from django.db.models import Field, IntegerField, Transform
from django.utils import six
from django.utils.translation import string_concat, ugettext_lazy as _
__all__ = ['ArrayField']
class AttributeSetter(object):
def __init__(self, name, value):
setattr(self, name, value)
class ArrayField(Field):
empty_strings_allowed = False
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
'nested_array_mismatch': _('Nested arrays must have the same length.'),
}
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
if self.size:
self.default_validators = self.default_validators[:]
self.default_validators.append(ArrayMaxLengthValidator(self.size))
super(ArrayField, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ArrayField, self).check(**kwargs)
if self.base_field.rel:
errors.append(
checks.Error(
'Base field for array cannot be a related field.',
hint=None,
obj=self,
id='postgres.E002'
)
)
else:
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = '\n '.join('%s (%s)' % (error.msg, error.id) for error in base_errors)
errors.append(
checks.Error(
'Base field for array has errors:\n %s' % messages,
hint=None,
obj=self,
id='postgres.E001'
)
)
return errors
def set_attributes_from_name(self, name):
super(ArrayField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
@property
def description(self):
return 'Array of %s' % self.base_field.description
def db_type(self, connection):
size = self.size or ''
return '%s[%s]' % (self.base_field.db_type(connection), size)
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, list) or isinstance(value, tuple):
return [self.base_field.get_db_prep_value(i, connection, prepared) for i in value]
return value
def deconstruct(self):
name, path, args, kwargs = super(ArrayField, self).deconstruct()
if path == 'django.contrib.postgres.fields.array.ArrayField':
path = 'django.contrib.postgres.fields.ArrayField'
kwargs.update({
'base_field': self.base_field,
'size': self.size,
})
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, six.string_types):
# Assume we're deserializing
vals = json.loads(value)
value = [self.base_field.to_python(val) for val in vals]
return value
def value_to_string(self, obj):
values = []
vals = self._get_val_from_obj(obj)
base_field = self.base_field
for val in vals:
obj = AttributeSetter(base_field.attname, val)
values.append(base_field.value_to_string(obj))
return json.dumps(values)
def get_transform(self, name):
transform = super(ArrayField, self).get_transform(name)
if transform:
return transform
try:
index = int(name)
except ValueError:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self.base_field)
try:
start, end = name.split('_')
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
def validate(self, value, model_instance):
super(ArrayField, self).validate(value, model_instance)
for i, part in enumerate(value):
try:
self.base_field.validate(part, model_instance)
except exceptions.ValidationError as e:
raise exceptions.ValidationError(
string_concat(self.error_messages['item_invalid'], e.message),
code='item_invalid',
params={'nth': i},
)
if isinstance(self.base_field, ArrayField):
if len({len(i) for i in value}) > 1:
raise exceptions.ValidationError(
self.error_messages['nested_array_mismatch'],
code='nested_array_mismatch',
)
def formfield(self, **kwargs):
defaults = {
'form_class': SimpleArrayField,
'base_field': self.base_field.formfield(),
'max_length': self.size,
}
defaults.update(kwargs)
return super(ArrayField, self).formfield(**defaults)
@ArrayField.register_lookup
class ArrayContains(lookups.DataContains):
def as_sql(self, qn, connection):
sql, params = super(ArrayContains, self).as_sql(qn, connection)
sql += '::%s' % self.lhs.output_field.db_type(connection)
return sql, params
@ArrayField.register_lookup
class ArrayContainedBy(lookups.ContainedBy):
def as_sql(self, qn, connection):
sql, params = super(ArrayContainedBy, self).as_sql(qn, connection)
sql += '::%s' % self.lhs.output_field.db_type(connection)
return sql, params
@ArrayField.register_lookup
class ArrayOverlap(lookups.Overlap):
def as_sql(self, qn, connection):
sql, params = super(ArrayOverlap, self).as_sql(qn, connection)
sql += '::%s' % self.lhs.output_field.db_type(connection)
return sql, params
@ArrayField.register_lookup
class ArrayLenTransform(Transform):
lookup_name = 'len'
output_field = IntegerField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'array_length(%s, 1)' % lhs, params
class IndexTransform(Transform):
def __init__(self, index, base_field, *args, **kwargs):
super(IndexTransform, self).__init__(*args, **kwargs)
self.index = index
self.base_field = base_field
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s]' % (lhs, self.index), params
@property
def output_field(self):
return self.base_field
class IndexTransformFactory(object):
def __init__(self, index, base_field):
self.index = index
self.base_field = base_field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.base_field, *args, **kwargs)
class SliceTransform(Transform):
def __init__(self, start, end, *args, **kwargs):
super(SliceTransform, self).__init__(*args, **kwargs)
self.start = start
self.end = end
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s:%s]' % (lhs, self.start, self.end), params
class SliceTransformFactory(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __call__(self, *args, **kwargs):
return SliceTransform(self.start, self.end, *args, **kwargs)
|
|
import textwrap
import unittest
from collections import OrderedDict
from parameterized import parameterized
from conans.paths import CONANFILE
from conans.test.utils.tools import NO_SETTINGS_PACKAGE_ID, TestClient, TestServer, \
inc_package_manifest_timestamp, inc_recipe_manifest_timestamp
class VersionRangesUpdatingTest(unittest.TestCase):
def test_update_remote(self):
# https://github.com/conan-io/conan/issues/5333
client = TestClient(servers={"default": TestServer()},
users={"default": [("lasote", "mypass")]})
conanfile = textwrap.dedent("""
from conans import ConanFile
class Boost(ConanFile):
pass
""")
client.save({"conanfile.py": conanfile})
client.run("create . boost/1.68.0@lasote/stable")
client.run("create . boost/1.69.0@lasote/stable")
client.run("create . boost/1.70.0@lasote/stable")
client.run("upload * -r=default --all --confirm")
client.run("remove * -f")
conanfile = textwrap.dedent("""
[requires]
boost/[>=1.68.0]@lasote/stable
""")
client.save({"conanfile.txt": conanfile}, clean_first=True)
client.run("install .")
self.assertIn("boost/*@lasote/stable versions found in 'default' remote", client.out)
self.assertIn("resolved to 'boost/1.70.0@lasote/stable' in remote 'default'", client.out)
self.assertNotIn("boost/1.69.0", client.out)
self.assertNotIn("boost/1.68.0", client.out)
client.run("install .")
self.assertIn("resolved to 'boost/1.70.0@lasote/stable' in local cache", client.out)
self.assertIn("boost/1.70.0", client.out)
self.assertNotIn("boost/1.69.0", client.out)
self.assertNotIn("boost/1.68.0", client.out)
client.run("install . --update")
self.assertIn("resolved to 'boost/1.70.0@lasote/stable' in remote 'default'", client.out)
self.assertIn("boost/1.70.0", client.out)
self.assertNotIn("boost/1.69.0", client.out)
self.assertNotIn("boost/1.68.0", client.out)
def test_update(self):
client = TestClient(servers={"default": TestServer()},
users={"default": [("lasote", "mypass")]})
conanfile = """from conans import ConanFile
class HelloReuseConan(ConanFile):
pass
"""
client.save({"conanfile.py": conanfile})
client.run("create . Pkg/1.1@lasote/testing")
client.run("create . Pkg/1.2@lasote/testing")
client.run("upload Pkg* -r=default --all --confirm")
client.run("remove Pkg/1.2@lasote/testing -f")
conanfile = """from conans import ConanFile
class HelloReuseConan(ConanFile):
requires = "Pkg/[~1]@lasote/testing"
"""
client.save({"conanfile.py": conanfile})
client.run("install .")
# Resolves to local package
self.assertIn("Pkg/1.1@lasote/testing: Already installed!", client.out)
client.run("install . --update")
# Resolves to remote package
self.assertIn("Pkg/1.2@lasote/testing: Package installed", client.out)
self.assertNotIn("Pkg/1.1", client.out)
# removes remote
client.run("remove Pkg* -r=default --f")
# Resolves to local package
client.run("install .")
self.assertIn("Pkg/1.2@lasote/testing: Already installed!", client.out)
# Update also resolves to local package
client.run("install . --update")
self.assertIn("Pkg/1.2@lasote/testing: Already installed!", client.out)
self.assertNotIn("Pkg/1.1", client.out)
def test_update_pkg(self):
server = TestServer()
client = TestClient(servers={"default": server},
users={"default": [("lasote", "mypass")]})
conanfile = """from conans import ConanFile
class HelloReuseConan(ConanFile):
def package_info(self):
self.output.info("PACKAGE_INFO {}")
"""
client.save({"conanfile.py": conanfile.format("1.1")})
client.run("create . Pkg/1.1@lasote/testing")
client.save({"conanfile.py": conanfile.format("1.2")})
client.run("create . Pkg/1.2@lasote/testing")
client.run("upload Pkg* -r=default --all --confirm")
consumer = """from conans import ConanFile
class HelloReuseConan(ConanFile):
requires = "Pkg/[~1]@lasote/testing"
"""
client.save({"conanfile.py": consumer})
client.run("install .")
# Resolves to local package
self.assertIn("Pkg/1.2@lasote/testing: Already installed!", client.out)
self.assertIn("Pkg/1.2@lasote/testing: PACKAGE_INFO 1.2", client.out)
# modify remote 1.2
client2 = TestClient(servers={"default": server},
users={"default": [("lasote", "mypass")]})
client2.save({"conanfile.py": conanfile.format("*1.2*")})
client2.run("create . Pkg/1.2@lasote/testing")
# Make sure timestamp increases, in some machines in testing,
# it can fail due to same timestamp
inc_recipe_manifest_timestamp(client2.cache, "Pkg/1.2@lasote/testing", 1)
inc_package_manifest_timestamp(client2.cache,
"Pkg/1.2@lasote/testing:%s" % NO_SETTINGS_PACKAGE_ID,
1)
client2.run("upload Pkg* -r=default --all --confirm")
client.run("install .")
# Resolves to local package
self.assertIn("Pkg/1.2@lasote/testing: Already installed!", client.out)
self.assertIn("Pkg/1.2@lasote/testing: PACKAGE_INFO 1.2", client.out)
client.run("install . --update")
# Resolves to remote new recipe and package
self.assertIn("Pkg/1.2@lasote/testing: Package installed", client.out)
self.assertNotIn("Pkg/1.2@lasote/testing: PACKAGE_INFO 1.2", client.out)
self.assertIn("Pkg/1.2@lasote/testing: PACKAGE_INFO *1.2*", client.out)
class VersionRangesMultiRemoteTest(unittest.TestCase):
def setUp(self):
self.servers = OrderedDict()
self.servers["default"] = TestServer()
self.servers["other"] = TestServer()
self.client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")],
"other": [("lasote", "mypass")]})
def _export(self, name, version, deps=None, export=True, upload=True, remote="default"):
deps = ", ".join(['"%s"' % d for d in deps or []]) or '""'
conanfile = """
from conans import ConanFile, CMake
import os
class HelloReuseConan(ConanFile):
name = "%s"
version = "%s"
requires = %s
""" % (name, version, deps)
files = {CONANFILE: conanfile}
self.client.save(files, clean_first=True)
if export:
self.client.run("export . lasote/stable")
if upload:
self.client.run("upload %s/%s@lasote/stable -r=%s" % (name, version, remote))
def test_resolve_from_remotes(self):
self._export("Hello0", "0.1")
self._export("Hello0", "0.2")
self._export("Hello0", "0.3", remote="other")
self._export("Hello1", "0.1", ["Hello0/[>0.1,<0.4]@lasote/stable"], export=False,
upload=False)
for remote, solution in [("default", "0.2"), ("other", "0.3")]:
self.client.run('remove "Hello0/0.*" -f')
self.client.run("install . --build missing -r=%s" % remote)
self.assertIn("Version range '>0.1,<0.4' required by "
"'conanfile.py (Hello1/0.1)' "
"resolved to 'Hello0/%s@lasote/stable'" % solution,
self.client.out)
self.assertIn("conanfile.py (Hello1/0.1): Generated conaninfo.txt",
self.client.out)
content = self.client.load("conaninfo.txt")
self.assertIn("Hello0/%s@lasote/stable" % solution, content)
class VersionRangesDiamondTest(unittest.TestCase):
def setUp(self):
test_server = TestServer()
self.servers = {"default": test_server}
self.client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
def _export(self, name, version, deps=None, export=True, upload=True):
deps = ", ".join(['"%s"' % d for d in deps or []]) or '""'
conanfile = """
from conans import ConanFile, CMake
import os
class HelloReuseConan(ConanFile):
name = "%s"
version = "%s"
requires = %s
""" % (name, version, deps)
files = {CONANFILE: conanfile}
self.client.save(files, clean_first=True)
if export:
self.client.run("export . lasote/stable")
if upload:
self.client.run("upload %s/%s@lasote/stable" % (name, version))
def test_local_then_remote(self):
self._export("Hello0", "0.1")
self._export("Hello0", "0.2")
self._export("Hello0", "0.3")
self._export("Hello0", "1.4")
self._export("Hello1", "0.1", ["Hello0/[>0.1,<0.3]@lasote/stable"], export=False,
upload=False)
self.client.run('remove "Hello0/0.*" -f')
self.client.run("install . --build missing")
self.assertIn("Version range '>0.1,<0.3' required by 'conanfile.py (Hello1/0.1)' "
"resolved to 'Hello0/0.2@lasote/stable'", self.client.out)
self.assertIn("conanfile.py (Hello1/0.1): Generated conaninfo.txt",
self.client.out)
content = self.client.load("conaninfo.txt")
self.assertIn("Hello0/0.2@lasote/stable", content)
@parameterized.expand([(False, ), (True,)])
def test_reuse(self, upload):
self._export("Hello0", "0.1", upload=upload)
self._export("Hello0", "0.2", upload=upload)
self._export("Hello0", "0.3", upload=upload)
self._export("Hello1", "0.1", ["Hello0/[>0.1,<0.3]@lasote/stable"], upload=upload)
self._export("Hello2", "0.1", ["Hello0/[0.2]@lasote/stable"], upload=upload)
self._export("Hello3", "0.1", ["Hello1/[>=0]@lasote/stable", "Hello2/[~=0]@lasote/stable"],
export=False, upload=upload)
if upload:
self.client.run('remove "*" -f')
self.client.run("install . --build missing")
def check1():
self.assertIn("Version range '~=0' required by 'conanfile.py (Hello3/0.1)' "
"resolved to 'Hello2/0.1@lasote/stable'", self.client.out)
self.assertIn("Version range '>0.1,<0.3' required by 'Hello1/0.1@lasote/stable' "
"resolved to 'Hello0/0.2@lasote/stable'", self.client.out)
self.assertIn("Version range '0.2' required by 'Hello2/0.1@lasote/stable' resolved "
"to 'Hello0/0.2@lasote/stable'", self.client.out)
self.assertNotIn("Conflict", self.client.out)
self.assertIn("conanfile.py (Hello3/0.1): Generated conaninfo.txt",
self.client.out)
content = self.client.load("conaninfo.txt")
self.assertIn("Hello0/0.2@lasote/stable", content)
self.assertIn("Hello1/0.1@lasote/stable", content)
self.assertIn("Hello2/0.1@lasote/stable", content)
check1()
if upload:
self._export("Hello0", "0.2.1", upload=upload)
self.client.run('remove Hello0/0.2.1@lasote/stable -f')
self._export("Hello3", "0.1", ["Hello1/[>=0]@lasote/stable",
"Hello2/[~=0]@lasote/stable"],
export=False, upload=upload)
self.client.run("install . --build missing")
check1()
# Now update
self.client.run("install . --update --build missing")
self.assertIn("Version range '~=0' required by 'conanfile.py (Hello3/0.1)' "
"resolved to 'Hello2/0.1@lasote/stable'", self.client.out)
self.assertIn("Version range '>0.1,<0.3' required by 'Hello1/0.1@lasote/stable' "
"resolved to 'Hello0/0.2.1@lasote/stable'", self.client.out)
self.assertIn("Version range '0.2' required by 'Hello2/0.1@lasote/stable' resolved "
"to 'Hello0/0.2.1@lasote/stable'", self.client.out)
self.assertNotIn("Conflict", self.client.out)
self.assertIn("conanfile.py (Hello3/0.1): Generated conaninfo.txt",
self.client.out)
content = self.client.load("conaninfo.txt")
self.assertIn("Hello0/0.2.1@lasote/stable", content)
self.assertIn("Hello1/0.1@lasote/stable", content)
self.assertIn("Hello2/0.1@lasote/stable", content)
def test_no_joint_compatibility_resolved(self):
"""Test to verify that conan is not resolving using joint-compatibility of the full graph
and you need to specify the right order or override downstream the conflict"""
self._export("ProblemRequirement", "1.0.0", upload=True)
self._export("ProblemRequirement", "1.1.0", upload=True)
self._export("RequirementOne", "1.2.3",
["ProblemRequirement/[=1.0.0]@lasote/stable"], upload=True)
self._export("RequirementTwo", "4.5.6",
["ProblemRequirement/[~1]@lasote/stable"], upload=True)
self._export("Project", "1.0.0",
["RequirementTwo/[=4.5.6]@lasote/stable",
"RequirementOne/[=1.2.3]@lasote/stable"], upload=True)
self.client.run("remove '*' -f")
self.client.run("install Project/1.0.0@lasote/stable --build missing", assert_error=True)
self.assertIn("Conflict in RequirementOne/1.2.3@lasote/stable:\n"
" 'RequirementOne/1.2.3@lasote/stable' requires "
"'ProblemRequirement/1.0.0@lasote/stable' while 'RequirementTwo/4.5.6@lasote/stable'"
" requires 'ProblemRequirement/1.1.0@lasote/stable'.\n"
" To fix this conflict you need to override the package 'ProblemRequirement' in "
"your root package.", self.client.out)
# Change the order, now it resolves correctly
self._export("Project", "1.0.0",
["RequirementOne/[=1.2.3]@lasote/stable",
"RequirementTwo/[=4.5.6]@lasote/stable",
], upload=True)
self.client.run("remove '*' -f")
self.client.run("install Project/1.0.0@lasote/stable --build missing")
|
|
#!/usr/bin/env python
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Tools to control application running via dev_appserver.py.
Useful for smoke and integration tests.
"""
# pylint: disable=no-init
from __future__ import print_function
import collections
import cookielib
import ctypes
import json
import logging
import os
import shutil
import socket
import subprocess
import sys
import time
from six.moves import urllib
from . import gae_sdk_utils
def terminate_with_parent():
"""Sets up current process to receive SIGTERM when its parent dies.
Works on Linux only. On Win and Mac it's noop.
"""
try:
libc = ctypes.CDLL('libc.so.6')
except OSError:
return
PR_SET_PDEATHSIG = 1
SIGTERM = 15
try:
libc.prctl(PR_SET_PDEATHSIG, SIGTERM)
except AttributeError:
return
def is_port_free(host, port):
"""Returns True if the listening port number is available."""
s = socket.socket()
try:
# connect_ex returns 0 on success (i.e. port is being listened to).
return bool(s.connect_ex((host, port)))
finally:
s.close()
def find_free_ports(host, base_port, count):
"""Finds several consecutive listening ports free to listen to."""
while base_port < (2<<16):
candidates = range(base_port, base_port + count)
if all(is_port_free(host, port) for port in candidates):
return candidates
base_port += len(candidates)
assert False, (
'Failed to find %d available ports starting at %d' % (count, base_port))
class LocalApplication(object):
"""GAE application running locally via dev_appserver.py."""
def __init__(self, app_dir, base_port, listen_all, root, app_id=None):
self._app = gae_sdk_utils.Application(app_dir, app_id)
self._base_port = base_port
self._client = None
self._log = None
self._port = None
self._proc = None
self._proc_callback = None
self._serving = False
self._root = os.path.join(root, self.app_id)
self._listen_all = listen_all
@property
def app_id(self):
"""Application ID as specified in app.yaml."""
return self._app.app_id
@property
def port(self):
"""Main HTTP port that serves requests to the 'default' service.
Valid only after app has started.
"""
return self._port
@property
def url(self):
"""Host URL."""
return 'http://localhost:%d' % self._port
@property
def client(self):
"""HttpClient that can be used to make requests to the instance."""
return self._client
@property
def log(self):
"""Returns the log output. Only set after calling stop()."""
return self._log
def start(self):
"""Starts dev_appserver process."""
assert not self._proc, 'Already running'
# Clear state.
self._client = None
self._log = None
self._serving = False
# Find available ports, one per service and one for app admin.
free_ports = find_free_ports(
'localhost', self._base_port, len(self._app.services) + 1)
self._port = free_ports[0]
os.makedirs(os.path.join(self._root, 'storage'))
# Launch the process.
log_file = os.path.join(self._root, 'dev_appserver.log')
logging.info(
'Launching %s at %s, log is %s', self.app_id, self.url, log_file)
cmd = [
'--port', str(self._port),
'--admin_port', str(free_ports[-1]),
'--storage_path', os.path.join(self._root, 'storage'),
'--automatic_restart', 'no',
'--log_level', 'debug',
# Note: The random policy will provide the same consistency every
# time the test is run because the random generator is always given
# the same seed.
'--datastore_consistency_policy', 'random',
]
if self._listen_all:
cmd.extend(('--host', '0.0.0.0'))
cmd.extend(('--admin_host', '0.0.0.0'))
cmd.extend(('--api_host', '0.0.0.0'))
cmd.extend(('--enable_host_checking', 'false'))
else:
# The default is 'localhost' EXCEPT if environment variable
# 'DEVSHELL_CLIENT_PORT' is set, then the default is '0.0.0.0'. Take no
# chance and always bind to localhost.
cmd.extend(('--host', 'localhost'))
cmd.extend(('--admin_host', 'localhost'))
cmd.extend(('--api_host', 'localhost'))
kwargs = {}
if sys.platform != 'win32':
kwargs['preexec_fn'] = terminate_with_parent
with open(log_file, 'wb') as f:
self._proc, self._proc_callback = self._app.spawn_dev_appserver(
cmd, stdout=f, stderr=subprocess.STDOUT, **kwargs)
# Create a client that can talk to the service.
self._client = HttpClient(self.url)
def ensure_serving(self, timeout=10):
"""Waits for the service to start responding."""
if self._serving:
return
if not self._proc:
self.start()
logging.info('Waiting for %s to become ready...', self.app_id)
deadline = time.time() + timeout
alive = False
while self._proc.poll() is None and time.time() < deadline:
try:
urllib.request.urlopen(self.url + '/_ah/warmup')
alive = True
break
except urllib.error.URLError as exc:
if isinstance(exc, urllib.error.HTTPError):
alive = True
break
time.sleep(0.05)
if not alive:
logging.error('Service %s did\'t come online', self.app_id)
self.stop()
self.dump_log()
raise Exception('Failed to start %s' % self.app_id)
logging.info('Service %s is ready.', self.app_id)
self._serving = True
def stop(self):
"""Stops dev_appserver, collects its log.
Returns the process error code if applicable.
"""
if not self._proc:
return None
exit_code = self._proc.poll()
try:
logging.info('Stopping %s', self.app_id)
if self._proc.poll() is None:
try:
# Send SIGTERM.
self._proc.terminate()
except OSError:
pass
deadline = time.time() + 5
while self._proc.poll() is None and time.time() < deadline:
time.sleep(0.05)
exit_code = self._proc.poll()
if exit_code is None:
logging.error('Leaking PID %d', self._proc.pid)
finally:
with open(os.path.join(self._root, 'dev_appserver.log'), 'r') as f:
self._log = f.read()
self._client = None
self._port = None
self._proc = None
self._proc_callback()
self._serving = False
return exit_code
def wait(self):
"""Waits for the process to exit."""
self._proc.wait()
def dump_log(self):
"""Prints dev_appserver log to stderr, works only if app is stopped."""
print('-' * 60, file=sys.stderr)
print('dev_appserver.py log for %s' % self.app_id, file=sys.stderr)
print('-' * 60, file=sys.stderr)
for l in (self._log or '').strip('\n').splitlines():
sys.stderr.write(' %s\n' % l)
print('-' * 60, file=sys.stderr)
class CustomHTTPErrorHandler(urllib.request.HTTPDefaultErrorHandler):
"""Swallows exceptions that would be thrown on >30x HTTP status."""
def http_error_default(self, _request, response, _code, _msg, _hdrs):
return response
class HttpClient(object):
"""Makes HTTP requests to some instance of dev_appserver."""
# Return value of request(...) and json_request.
HttpResponse = collections.namedtuple(
'HttpResponse', ['http_code', 'body', 'headers'])
def __init__(self, url):
self._url = url
self._opener = urllib.request.build_opener(
CustomHTTPErrorHandler(),
urllib.request.HTTPCookieProcessor(cookielib.CookieJar()))
self._xsrf_token = None
def login_as_admin(self, user='test@example.com'):
"""Performs dev_appserver login as admin, modifies cookies."""
self.request('/_ah/login?email=%s&admin=True&action=Login' % user)
self._xsrf_token = None
def request(self, resource, body=None, headers=None, method=None):
"""Sends HTTP request."""
if not resource.startswith(self._url):
assert resource.startswith('/')
resource = self._url + resource
req = urllib.request.Request(resource, body, headers=(headers or {}))
if method:
req.get_method = lambda: method
resp = self._opener.open(req)
return self.HttpResponse(resp.getcode(), resp.read(), resp.info())
def json_request(self, resource, body=None, headers=None, method=None):
"""Sends HTTP request and returns deserialized JSON."""
if body is not None:
body = json.dumps(body)
headers = (headers or {}).copy()
headers['Content-Type'] = 'application/json; charset=UTF-8'
resp = self.request(resource, body, headers=headers, method=method)
try:
value = json.loads(resp.body)
except ValueError:
raise ValueError('Invalid JSON: %r' % resp.body)
return self.HttpResponse(resp.http_code, value, resp.headers)
@property
def url_opener(self):
"""Instance of urllib.request opener used by this class."""
return self._opener
@property
def xsrf_token(self):
"""Returns XSRF token for the service, fetching it if necessary.
It only works with apps that use 'auth' component.
"""
if self._xsrf_token is None:
resp = self.json_request(
'/auth/api/v1/accounts/self/xsrf_token',
body={},
headers={'X-XSRF-Token-Request': '1'})
self._xsrf_token = resp.body['xsrf_token'].encode('ascii')
return self._xsrf_token
|
|
import functools
from framework.auth import Auth
from website.archiver import (
StatResult, AggregateStatResult,
ARCHIVER_NETWORK_ERROR,
ARCHIVER_SIZE_EXCEEDED,
ARCHIVER_FILE_NOT_FOUND,
)
from website.archiver.model import ArchiveJob
from website import (
mails,
settings
)
def send_archiver_size_exceeded_mails(src, user, stat_result):
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.ARCHIVE_SIZE_EXCEEDED_DESK,
user=user,
src=src,
stat_result=stat_result
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_SIZE_EXCEEDED_USER,
user=user,
src=src,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_copy_error_mails(src, user, results):
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.ARCHIVE_COPY_ERROR_DESK,
user=user,
src=src,
results=results,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_COPY_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_file_not_found_mails(src, user, results):
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.ARCHIVE_FILE_NOT_FOUND_DESK,
user=user,
src=src,
results=results,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_FILE_NOT_FOUND_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_uncaught_error_mails(src, user, results):
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_DESK,
user=user,
src=src,
results=results,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def handle_archive_fail(reason, src, dst, user, result):
if reason == ARCHIVER_NETWORK_ERROR:
send_archiver_copy_error_mails(src, user, result)
elif reason == ARCHIVER_SIZE_EXCEEDED:
send_archiver_size_exceeded_mails(src, user, result)
elif reason == ARCHIVER_FILE_NOT_FOUND:
send_archiver_file_not_found_mails(src, user, result)
else: # reason == ARCHIVER_UNCAUGHT_ERROR
send_archiver_uncaught_error_mails(src, user, result)
dst.root.sanction.forcibly_reject()
dst.root.sanction.save()
dst.root.delete_registration_tree(save=True)
def archive_provider_for(node, user):
"""A generic function to get the archive provider for some node, user pair.
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.get_addon(settings.ARCHIVE_PROVIDER)
def has_archive_provider(node, user):
"""A generic function for checking whether or not some node, user pair has
an attached provider for archiving
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.has_addon(settings.ARCHIVE_PROVIDER)
def link_archive_provider(node, user):
"""A generic function for linking some node, user pair with the configured
archive provider
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
addon = node.get_or_add_addon(settings.ARCHIVE_PROVIDER, auth=Auth(user))
addon.on_add()
node.save()
def aggregate_file_tree_metadata(addon_short_name, fileobj_metadata, user):
"""Recursively traverse the addon's file tree and collect metadata in AggregateStatResult
:param src_addon: AddonNodeSettings instance of addon being examined
:param fileobj_metadata: file or folder metadata of current point of reference
in file tree
:param user: archive initatior
:return: top-most recursive call returns AggregateStatResult containing addon file tree metadata
"""
disk_usage = fileobj_metadata.get('size')
if fileobj_metadata['kind'] == 'file':
result = StatResult(
target_name=fileobj_metadata['name'],
target_id=fileobj_metadata['path'].lstrip('/'),
disk_usage=disk_usage or 0,
)
return result
else:
return AggregateStatResult(
target_id=fileobj_metadata['path'].lstrip('/'),
target_name=fileobj_metadata['name'],
targets=[aggregate_file_tree_metadata(addon_short_name, child, user) for child in fileobj_metadata.get('children', [])],
)
def before_archive(node, user):
link_archive_provider(node, user)
job = ArchiveJob(
src_node=node.registered_from,
dst_node=node,
initiator=user
)
job.set_targets()
def _do_get_file_map(file_tree):
"""Reduces a tree of folders and files into a list of (<sha256>, <file_metadata>) pairs
"""
file_map = []
stack = [file_tree]
while len(stack):
tree_node = stack.pop(0)
if tree_node['kind'] == 'file':
file_map.append((tree_node['extra']['hashes']['sha256'], tree_node))
else:
stack = stack + tree_node['children']
return file_map
def _memoize_get_file_map(func):
cache = {}
@functools.wraps(func)
def wrapper(node):
if node._id not in cache:
osf_storage = node.get_addon('osfstorage')
file_tree = osf_storage._get_file_tree(user=node.creator)
cache[node._id] = _do_get_file_map(file_tree)
return func(node, cache[node._id])
return wrapper
@_memoize_get_file_map
def get_file_map(node, file_map):
"""
note:: file_map is injected implictly by the decorator; this method is called like:
get_file_map(node)
"""
for (key, value) in file_map:
yield (key, value, node._id)
for child in node.nodes_primary:
for key, value, node_id in get_file_map(child):
yield (key, value, node_id)
def find_registration_file(value, node):
from website.models import Node
orig_sha256 = value['sha256']
orig_name = value['selectedFileName']
orig_node = value['nodeId']
file_map = get_file_map(node)
for sha256, value, node_id in file_map:
registered_from_id = Node.load(node_id).registered_from._id
if sha256 == orig_sha256 and registered_from_id == orig_node and orig_name == value['name']:
return value, node_id
return None, None
def find_registration_files(values, node):
ret = []
for i in range(len(values.get('extra', []))):
ret.append(find_registration_file(values['extra'][i], node) + (i,))
return ret
def find_question(schema, qid):
for page in schema['pages']:
questions = {
q['qid']: q
for q in page['questions']
}
if qid in questions:
return questions[qid]
def find_selected_files(schema, metadata):
targets = []
paths = [('', p) for p in schema.schema['pages']]
while len(paths):
prefix, path = paths.pop(0)
if path.get('questions'):
paths = paths + [('', q) for q in path['questions']]
elif path.get('type'):
qid = path.get('qid', path.get('id'))
if path['type'] == 'object':
paths = paths + [('{}.{}.value'.format(prefix, qid), p) for p in path['properties']]
elif path['type'] == 'osf-upload':
targets.append('{}.{}'.format(prefix, qid).lstrip('.'))
selected = {}
for t in targets:
parts = t.split('.')
value = metadata.get(parts.pop(0))
while value and len(parts):
value = value.get(parts.pop(0))
if value:
selected[t] = value
return selected
VIEW_FILE_URL_TEMPLATE = '/project/{node_id}/files/osfstorage/{path}/'
def deep_get(obj, path):
parts = path.split('.')
item = obj
key = None
while len(parts):
key = parts.pop(0)
item[key] = item.get(key, {})
item = item[key]
return item
def migrate_file_metadata(dst, schema):
metadata = dst.registered_meta[schema._id]
missing_files = []
selected_files = find_selected_files(schema, metadata)
for path, selected in selected_files.items():
for registration_file, node_id, index in find_registration_files(selected, dst):
if not registration_file:
missing_files.append({
'file_name': selected['extra'][index]['selectedFileName'],
'question_title': find_question(schema.schema, path[0])['title']
})
continue
target = deep_get(metadata, path)
target['extra'][index]['viewUrl'] = VIEW_FILE_URL_TEMPLATE.format(node_id=node_id, path=registration_file['path'].lstrip('/'))
if missing_files:
from website.archiver.tasks import ArchivedFileNotFound
raise ArchivedFileNotFound(
registration=dst,
missing_files=missing_files
)
dst.registered_meta[schema._id] = metadata
dst.save()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GridRNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class GridRNNCellTest(tf.test.TestCase):
def testGrid2BasicLSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope(
'root', initializer=tf.constant_initializer(0.2)) as root_scope:
x = tf.zeros([1, 3])
m = tf.zeros([1, 8])
cell = tf.contrib.grid_rnn.Grid2BasicLSTMCell(2)
self.assertEqual(cell.state_size, 8)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 8))
sess.run([tf.initialize_all_variables()])
res = sess.run(
[g, s], {x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.36617181, 0.36617181]])
self.assertAllClose(res[1], [[0.71053141, 0.71053141, 0.36617181,
0.36617181, 0.72320831, 0.80555487,
0.39102408, 0.42150158]])
# emulate a loop through the input sequence,
# where we call cell() multiple times
root_scope.reuse_variables()
g2, s2 = cell(x, m)
self.assertEqual(g2.get_shape(), (1, 2))
self.assertEqual(s2.get_shape(), (1, 8))
res = sess.run([g2, s2], {x: np.array([[2., 2., 2.]]), m: res[1]})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.58847463, 0.58847463]])
self.assertAllClose(res[1], [[1.40469193, 1.40469193, 0.58847463,
0.58847463, 0.97726452, 1.04626071,
0.4927212, 0.51137757]])
def testGrid2BasicLSTMCellTied(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.2)):
x = tf.zeros([1, 3])
m = tf.zeros([1, 8])
cell = tf.contrib.grid_rnn.Grid2BasicLSTMCell(2, tied=True)
self.assertEqual(cell.state_size, 8)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 8))
sess.run([tf.initialize_all_variables()])
res = sess.run(
[g, s], {x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.36617181, 0.36617181]])
self.assertAllClose(res[1], [[0.71053141, 0.71053141, 0.36617181,
0.36617181, 0.72320831, 0.80555487,
0.39102408, 0.42150158]])
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]), m: res[1]})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.36703536, 0.36703536]])
self.assertAllClose(res[1], [[0.71200621, 0.71200621, 0.36703536,
0.36703536, 0.80941606, 0.87550586,
0.40108523, 0.42199609]])
def testGrid2BasicLSTMCellWithRelu(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.2)):
x = tf.zeros([1, 3])
m = tf.zeros([1, 4])
cell = tf.contrib.grid_rnn.Grid2BasicLSTMCell(
2, tied=False, non_recurrent_fn=tf.nn.relu)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 4))
sess.run([tf.initialize_all_variables()])
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[0.31667367, 0.31667367]])
self.assertAllClose(res[1],
[[0.29530135, 0.37520045, 0.17044567, 0.21292259]])
"""LSTMCell
"""
def testGrid2LSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3])
m = tf.zeros([1, 8])
cell = tf.contrib.grid_rnn.Grid2LSTMCell(2, use_peepholes=True)
self.assertEqual(cell.state_size, 8)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 8))
sess.run([tf.initialize_all_variables()])
res = sess.run(
[g, s], {x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.95686918, 0.95686918]])
self.assertAllClose(res[1], [[2.41515064, 2.41515064, 0.95686918,
0.95686918, 1.38917875, 1.49043763,
0.83884692, 0.86036491]])
def testGrid2LSTMCellTied(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3])
m = tf.zeros([1, 8])
cell = tf.contrib.grid_rnn.Grid2LSTMCell(
2, tied=True, use_peepholes=True)
self.assertEqual(cell.state_size, 8)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 8))
sess.run([tf.initialize_all_variables()])
res = sess.run(
[g, s], {x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.95686918, 0.95686918]])
self.assertAllClose(res[1], [[2.41515064, 2.41515064, 0.95686918,
0.95686918, 1.38917875, 1.49043763,
0.83884692, 0.86036491]])
def testGrid2LSTMCellWithRelu(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3])
m = tf.zeros([1, 4])
cell = tf.contrib.grid_rnn.Grid2LSTMCell(
2, use_peepholes=True, non_recurrent_fn=tf.nn.relu)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 4))
sess.run([tf.initialize_all_variables()])
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[2.1831727, 2.1831727]])
self.assertAllClose(res[1],
[[0.92270052, 1.02325559, 0.66159075, 0.70475441]])
"""RNNCell
"""
def testGrid2BasicRNNCell(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
x = tf.zeros([2, 2])
m = tf.zeros([2, 4])
cell = tf.contrib.grid_rnn.Grid2BasicRNNCell(2)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (2, 2))
self.assertEqual(s.get_shape(), (2, 4))
sess.run([tf.initialize_all_variables()])
res = sess.run(
[g, s], {x: np.array([[1., 1.], [2., 2.]]),
m: np.array([[0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2]])})
self.assertEqual(res[0].shape, (2, 2))
self.assertEqual(res[1].shape, (2, 4))
self.assertAllClose(res[0], [[0.94685763, 0.94685763],
[0.99480951, 0.99480951]])
self.assertAllClose(res[1],
[[0.94685763, 0.94685763, 0.80049908, 0.80049908],
[0.99480951, 0.99480951, 0.97574311, 0.97574311]])
def testGrid2BasicRNNCellTied(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
x = tf.zeros([2, 2])
m = tf.zeros([2, 4])
cell = tf.contrib.grid_rnn.Grid2BasicRNNCell(2, tied=True)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (2, 2))
self.assertEqual(s.get_shape(), (2, 4))
sess.run([tf.initialize_all_variables()])
res = sess.run(
[g, s], {x: np.array([[1., 1.], [2., 2.]]),
m: np.array([[0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2]])})
self.assertEqual(res[0].shape, (2, 2))
self.assertEqual(res[1].shape, (2, 4))
self.assertAllClose(res[0], [[0.94685763, 0.94685763],
[0.99480951, 0.99480951]])
self.assertAllClose(res[1],
[[0.94685763, 0.94685763, 0.80049908, 0.80049908],
[0.99480951, 0.99480951, 0.97574311, 0.97574311]])
def testGrid2BasicRNNCellWithRelu(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 2])
cell = tf.contrib.grid_rnn.Grid2BasicRNNCell(
2, non_recurrent_fn=tf.nn.relu)
self.assertEqual(cell.state_size, 2)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 2))
sess.run([tf.initialize_all_variables()])
res = sess.run([g, s], {x: np.array([[1., 1.]]),
m: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 2))
self.assertAllClose(res[0], [[1.80049896, 1.80049896]])
self.assertAllClose(res[1], [[0.80049896, 0.80049896]])
"""1-LSTM
"""
def testGrid1LSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope(
'root', initializer=tf.constant_initializer(0.5)) as root_scope:
x = tf.zeros([1, 3])
m = tf.zeros([1, 4])
cell = tf.contrib.grid_rnn.Grid1LSTMCell(2, use_peepholes=True)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 4))
sess.run([tf.initialize_all_variables()])
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[0.91287315, 0.91287315]])
self.assertAllClose(res[1],
[[2.26285243, 2.26285243, 0.91287315, 0.91287315]])
root_scope.reuse_variables()
x2 = tf.zeros([0, 0])
g2, s2 = cell(x2, m)
self.assertEqual(g2.get_shape(), (1, 2))
self.assertEqual(s2.get_shape(), (1, 4))
sess.run([tf.initialize_all_variables()])
res = sess.run([g2, s2], {m: res[1]})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[0.9032144, 0.9032144]])
self.assertAllClose(res[1],
[[2.79966092, 2.79966092, 0.9032144, 0.9032144]])
g3, s3 = cell(x2, m)
self.assertEqual(g3.get_shape(), (1, 2))
self.assertEqual(s3.get_shape(), (1, 4))
sess.run([tf.initialize_all_variables()])
res = sess.run([g3, s3], {m: res[1]})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[0.92727238, 0.92727238]])
self.assertAllClose(res[1],
[[3.3529923, 3.3529923, 0.92727238, 0.92727238]])
"""3-LSTM
"""
def testGrid3LSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3])
m = tf.zeros([1, 12])
cell = tf.contrib.grid_rnn.Grid3LSTMCell(2, use_peepholes=True)
self.assertEqual(cell.state_size, 12)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 12))
sess.run([tf.initialize_all_variables()])
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,
0.8, -0.1, -0.2, -0.3, -0.4]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 12))
self.assertAllClose(res[0], [[0.96892911, 0.96892911]])
self.assertAllClose(res[1], [[2.45227885, 2.45227885, 0.96892911,
0.96892911, 1.33592629, 1.4373529,
0.80867189, 0.83247656, 0.7317788,
0.63205892, 0.56548983, 0.50446129]])
"""Edge cases
"""
def testGridRNNEdgeCasesLikeRelu(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
x = tf.zeros([3, 2])
m = tf.zeros([0, 0])
# this is equivalent to relu
cell = tf.contrib.grid_rnn.GridRNNCell(
num_units=2,
num_dims=1,
input_dims=0,
output_dims=0,
non_recurrent_dims=0,
non_recurrent_fn=tf.nn.relu)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (3, 2))
self.assertEqual(s.get_shape(), (0, 0))
sess.run([tf.initialize_all_variables()])
res = sess.run([g, s], {x: np.array([[1., -1.], [-2, 1], [2, -1]])})
self.assertEqual(res[0].shape, (3, 2))
self.assertEqual(res[1].shape, (0, 0))
self.assertAllClose(res[0], [[0, 0], [0, 0], [0.5, 0.5]])
def testGridRNNEdgeCasesNoOutput(self):
with self.test_session() as sess:
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 4])
# This cell produces no output
cell = tf.contrib.grid_rnn.GridRNNCell(
num_units=2,
num_dims=2,
input_dims=0,
output_dims=None,
non_recurrent_dims=0,
non_recurrent_fn=tf.nn.relu)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (0, 0))
self.assertEqual(s.get_shape(), (1, 4))
sess.run([tf.initialize_all_variables()])
res = sess.run([g, s], {x: np.array([[1., 1.]]),
m: np.array([[0.1, 0.1, 0.1, 0.1]])})
self.assertEqual(res[0].shape, (0, 0))
self.assertEqual(res[1].shape, (1, 4))
"""Test with tf.nn.rnn
"""
def testGrid2LSTMCellWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.grid_rnn.Grid2LSTMCell(num_units=num_units)
inputs = max_length * [
tf.placeholder(
tf.float32, shape=(batch_size, input_size))
]
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state.get_shape(), (batch_size, 8))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape()[0], inp.get_shape()[0])
self.assertEqual(out.get_shape()[1], num_units)
self.assertEqual(out.dtype, inp.dtype)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for v in values:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid2LSTMCellReLUWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.grid_rnn.Grid2LSTMCell(
num_units=num_units, non_recurrent_fn=tf.nn.relu)
inputs = max_length * [
tf.placeholder(
tf.float32, shape=(batch_size, input_size))
]
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state.get_shape(), (batch_size, 4))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape()[0], inp.get_shape()[0])
self.assertEqual(out.get_shape()[1], num_units)
self.assertEqual(out.dtype, inp.dtype)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for v in values:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid3LSTMCellReLUWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.grid_rnn.Grid3LSTMCell(
num_units=num_units, non_recurrent_fn=tf.nn.relu)
inputs = max_length * [
tf.placeholder(
tf.float32, shape=(batch_size, input_size))
]
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state.get_shape(), (batch_size, 8))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape()[0], inp.get_shape()[0])
self.assertEqual(out.get_shape()[1], num_units)
self.assertEqual(out.dtype, inp.dtype)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for v in values:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid1LSTMCellWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.grid_rnn.Grid1LSTMCell(num_units=num_units)
# for 1-LSTM, we only feed the first step
inputs = ([tf.placeholder(tf.float32, shape=(batch_size, input_size))]
+ (max_length - 1) * [tf.zeros([batch_size, input_size])])
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state.get_shape(), (batch_size, 4))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), (3, num_units))
self.assertEqual(out.dtype, inp.dtype)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for v in values:
self.assertTrue(np.all(np.isfinite(v)))
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iterator ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _convert_external_state_policy_to_enum(external_state_policy):
if isinstance(external_state_policy, distribute_options.ExternalStatePolicy):
return external_state_policy
if external_state_policy == "warn":
return distribute_options.ExternalStatePolicy.WARN
if external_state_policy == "ignore":
return distribute_options.ExternalStatePolicy.IGNORE
if external_state_policy == "fail":
return distribute_options.ExternalStatePolicy.FAIL
raise ValueError(
"Failed to convert {} to an instance of ExternalStatePolicy."
"Supported values include: 'warn', 'ignore' and 'fail'".format(
external_state_policy))
@tf_export("data.experimental.make_saveable_from_iterator")
@deprecation.deprecated(
None, "`make_saveable_from_iterator` is intended for use in TF1 with "
"`tf.compat.v1.Saver`. In TF2, use `tf.train.Checkpoint` instead.")
def make_saveable_from_iterator(iterator, external_state_policy=None):
"""Returns a SaveableObject for saving/restoring iterator state using Saver.
Args:
iterator: Iterator.
external_state_policy: A string that identifies how to handle input
pipelines that depend on external state. Possible values are
'ignore': The external state is silently ignored.
'warn': The external state is ignored, logging a warning.
'fail': The operation fails upon encountering external state.
By default we set it to 'fail'.
Returns:
A SaveableObject for saving/restoring iterator state using Saver.
Raises:
ValueError: If iterator does not support checkpointing.
ValueError: If `external_state_policy` is not one of 'warn', 'ignore' or
'fail'.
For example:
```python
with tf.Graph().as_default():
ds = tf.data.Dataset.range(10)
iterator = ds.make_initializable_iterator()
# Build the iterator SaveableObject.
saveable_obj = tf.data.experimental.make_saveable_from_iterator(iterator)
# Add the SaveableObject to the SAVEABLE_OBJECTS collection so
# it can be automatically saved using Saver.
tf.compat.v1.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)
saver = tf.compat.v1.train.Saver()
while continue_training:
... Perform training ...
if should_save_checkpoint:
saver.save()
```
Note: When restoring the iterator, the existing iterator state is completely
discarded. This means that any changes you may have made to the Dataset
graph will be discarded as well! This includes the new Dataset graph
that you may have built during validation. So, while running validation,
make sure to run the initializer for the validation input pipeline after
restoring the checkpoint.
Note: Not all iterators support checkpointing yet. Attempting to save the
state of an unsupported iterator will throw an error.
"""
if external_state_policy is None:
external_state_policy = "fail"
policy_enum = _convert_external_state_policy_to_enum(external_state_policy)
return iterator_ops._IteratorSaveable( # pylint: disable=protected-access
iterator._iterator_resource, # pylint: disable=protected-access
iterator._iterator_resource.name, # pylint: disable=protected-access
external_state_policy=policy_enum)
@tf_export("data.experimental.CheckpointInputPipelineHook")
class CheckpointInputPipelineHook(session_run_hook.SessionRunHook):
"""Checkpoints input pipeline state every N steps or seconds.
This hook saves the state of the iterators in the `Graph` so that when
training is resumed the input pipeline continues from where it left off.
This could potentially avoid overfitting in certain pipelines where the
number of training steps per eval are small compared to the dataset
size or if the training pipeline is pre-empted.
Differences from `CheckpointSaverHook`:
1. Saves only the input pipelines in the "iterators" collection and not the
global variables or other saveable objects.
2. Does not write the `GraphDef` and `MetaGraphDef` to the summary.
Example of checkpointing the training pipeline:
```python
est = tf.estimator.Estimator(model_fn)
while True:
est.train(
train_input_fn,
hooks=[tf.data.experimental.CheckpointInputPipelineHook(est)],
steps=train_steps_per_eval)
# Note: We do not pass the hook here.
metrics = est.evaluate(eval_input_fn)
if should_stop_the_training(metrics):
break
```
This hook should be used if the input pipeline state needs to be saved
separate from the model checkpoint. Doing so may be useful for a few reasons:
1. The input pipeline checkpoint may be large, if there are large shuffle
or prefetch buffers for instance, and may bloat the checkpoint size.
2. If the input pipeline is shared between training and validation, restoring
the checkpoint during validation may override the validation input
pipeline.
For saving the input pipeline checkpoint alongside the model weights use
`tf.data.experimental.make_saveable_from_iterator` directly to create a
`SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,
that you will need to be careful not to restore the training iterator during
eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS
collector when building the eval graph.
"""
def __init__(self, estimator, external_state_policy=None):
"""Initializes a `CheckpointInputPipelineHook`.
If the input pipeline depends on external state (e.g. seeds for
RandomUniform) beyond the input pipeline, this hook would be unable to
serialize and deserialize that state. If its acceptable to ignore that state
change the external_state_policy argument to 'warn' or 'ignore'. For e.g.
```python
est = tf.estimator.Estimator(model_fn)
while True:
est.train(
train_input_fn,
hooks=[tf.data.experimental.CheckpointInputPipelineHook(
est, external_state_policy='warn')],
steps=train_steps_per_eval)
# Note: We do not pass the hook here.
metrics = est.evaluate(eval_input_fn)
if should_stop_the_training(metrics):
break
```
Args:
estimator: Estimator.
external_state_policy: A string that identifies how to handle input
pipelines that depend on external state. Possible values are
'ignore': The external state is silently ignored.
'warn': The external state is ignored, logging a warning.
'fail': The operation fails upon encountering external state.
By default we set it to 'fail'.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of saver or scaffold should be set.
ValueError: If `external_state_policy` is not one of 'warn', 'ignore' or
'fail'.
"""
if external_state_policy is None:
external_state_policy = "fail"
self._external_state_policy = _convert_external_state_policy_to_enum(
external_state_policy)
# `checkpoint_basename` is "input.ckpt" for non-distributed pipelines or
# of the form "input_<task_type>_<task_id>.ckpt" for distributed pipelines.
# Note: The default `checkpoint_basename` used by `CheckpointSaverHook` is
# "model.ckpt". We intentionally choose the input pipeline checkpoint prefix
# to be different to avoid conflicts with the model checkpoint.
# pylint: disable=protected-access
checkpoint_prefix = "input"
if estimator._config.num_worker_replicas > 1:
# Distributed setting.
suffix = "_{}_{}".format(estimator._config.task_type,
estimator._config.task_id)
checkpoint_prefix += suffix
# pylint: enable=protected-access
# We use a composition paradigm instead of inheriting from
# `CheckpointSaverHook` because `Estimator` does an `isinstance` check
# to check whether a `CheckpointSaverHook` is already present in the list
# of hooks and if not, adds one. Inheriting from `CheckpointSaverHook`
# would thwart this behavior. This hook checkpoints *only the iterators*
# and not the graph variables.
self._checkpoint_saver_hook = basic_session_run_hooks.CheckpointSaverHook(
estimator.model_dir,
save_secs=estimator._config.save_checkpoints_secs, # pylint: disable=protected-access
save_steps=estimator._config.save_checkpoints_steps, # pylint: disable=protected-access
checkpoint_basename=checkpoint_prefix + ".ckpt")
# Name for the protocol buffer file that will contain the list of most
# recent checkpoints stored as a `CheckpointState` protocol buffer.
# This file, kept in the same directory as the checkpoint files, is
# automatically managed by the `Saver` to keep track of recent checkpoints.
# The default name used by the `Saver` for this file is "checkpoint". Here
# we use the name "checkpoint_<checkpoint_prefix>" so that in case the
# `checkpoint_dir` is the same as the model checkpoint directory, there are
# no conflicts during restore.
self._latest_filename = "checkpoint_" + checkpoint_prefix
def begin(self):
# Build a Saver that saves all iterators in the `GLOBAL_ITERATORS`
# collection if no `Saver` or `Scaffold` is provided.
# pylint: disable=protected-access
if (self._checkpoint_saver_hook._saver is None and
self._checkpoint_saver_hook._scaffold is None):
iterators = ops.get_collection(iterator_ops.GLOBAL_ITERATORS)
saveables = [
iterator_ops._IteratorSaveable(
i, i.name, external_state_policy=self._external_state_policy)
for i in iterators
]
self._checkpoint_saver_hook._saver = _CustomSaver(
saveables, self._latest_filename, sharded=True)
# pylint: enable=protected-access
self._checkpoint_saver_hook.begin()
def after_create_session(self, session, coord):
# If a new session was created, we set _first_run to True so that we can
# restore if needed.
self._first_run = True
def _restore_or_save_initial_ckpt(self, session):
# Ideally this should be run in after_create_session but is not for the
# following reason:
# Currently there is no way of enforcing an order of running the
# `SessionRunHooks`. Hence it is possible that the `_DatasetInitializerHook`
# is run *after* this hook. That is troublesome because
# 1. If a checkpoint exists and this hook restores it, the initializer hook
# will override it.
# 2. If no checkpoint exists, this hook will try to save an uninitialized
# iterator which will result in an exception.
#
# As a temporary fix we enter the following implicit contract between this
# hook and the _DatasetInitializerHook.
# 1. The _DatasetInitializerHook initializes the iterator in the call to
# after_create_session.
# 2. This hook saves the iterator on the first call to `before_run()`, which
# is guaranteed to happen after `after_create_session()` of all hooks
# have been run.
# Check if there is an existing checkpoint. If so, restore from it.
# pylint: disable=protected-access
latest_checkpoint_path = checkpoint_management.latest_checkpoint(
self._checkpoint_saver_hook._checkpoint_dir,
latest_filename=self._latest_filename)
if latest_checkpoint_path:
self._checkpoint_saver_hook._get_saver().restore(session,
latest_checkpoint_path)
else:
# The checkpoint saved here is the state at step "global_step".
# Note: We do not save the GraphDef or MetaGraphDef here.
global_step = session.run(self._checkpoint_saver_hook._global_step_tensor)
self._checkpoint_saver_hook._save(session, global_step)
self._checkpoint_saver_hook._timer.update_last_triggered_step(global_step)
# pylint: enable=protected-access
def before_run(self, run_context):
if self._first_run:
self._restore_or_save_initial_ckpt(run_context.session)
self._first_run = False
return self._checkpoint_saver_hook.before_run(run_context)
def after_run(self, run_context, run_values):
self._checkpoint_saver_hook.after_run(run_context, run_values)
def end(self, session):
self._checkpoint_saver_hook.end(session)
class _CustomSaver(saver_lib.Saver):
"""`Saver` with a different default `latest_filename`.
This is used in the `CheckpointInputPipelineHook` to avoid conflicts with
the model ckpt saved by the `CheckpointSaverHook`.
"""
def __init__(self, var_list, latest_filename, sharded=False):
super(_CustomSaver, self).__init__(var_list, sharded=sharded)
self._latest_filename = latest_filename
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False):
return super(_CustomSaver, self).save(
sess, save_path, global_step, latest_filename or self._latest_filename,
meta_graph_suffix, write_meta_graph, write_state, strip_default_attrs)
|
|
"""
Container for functions that work with HDF5 genotype/phenotype datasets.
(Usually useful for analysing human data.)
"""
import h5py
import scipy as sp
import sys
import linear_models as lm
import analyze_gwas_results as agr
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def calculate_ibd_kinship(hdf5_filename='/home/bv25/data/Ls154/Ls154_12.hdf5',
chunk_size=1000, overwrite=False):
"""
Calculates a kinship matrix and stores it in the HDF5 file.
"""
h5f = h5py.File(hdf5_filename)
n_indivs = len(h5f['indiv_data']['indiv_ids'][...])
if overwrite or not 'kinship' in h5f.keys():
if 'kinship' in h5f.keys():
print 'Overwriting kinship.'
del h5f['kinship']
print 'Calculating kinship.'
k_mat = sp.zeros((n_indivs, n_indivs), dtype='single')
gg = h5f['genot_data']
chromosomes = gg.keys()
n_snps = 0
for chrom in chromosomes:
print 'Working on Chromosome %s' % chrom
cg = gg[chrom]
num_snps = len(cg['raw_snps'])
normalized = 'snps' in cg.keys()
for chunk_i, i in enumerate(range(0, num_snps, chunk_size)):
# if chunk_i % 2 != 0:
# continue
end_i = min(i + chunk_size, num_snps)
if normalized:
x = cg['snps'][i:end_i]
else:
x = cg['raw_snps'][i:end_i]
x = x.T
x = (x - sp.mean(x, 0)) / sp.std(x, 0)
x = x.T
n_snps += len(x)
k_mat += sp.dot(x.T, x)
del x
sys.stdout.write('\b\b\b\b\b\b\b%0.2f%%' % (100.0 * (min(1, ((chunk_i + 1.0) * chunk_size) / num_snps))))
sys.stdout.flush()
sys.stdout.write('\b\b\b\b\b\b\b100.00%\n')
k_mat = k_mat / float(n_snps)
c = sp.sum((sp.eye(len(k_mat)) - (1.0 / len(k_mat)) * sp.ones(k_mat.shape)) * sp.array(k_mat))
scalar = (len(k_mat) - 1) / c
print 'Kinship scaled by: %0.4f' % scalar
k = scalar * k_mat
h5f.create_dataset('kinship', data=k)
else:
print 'kinship already there.'
def run_emmax(hdf5_filename='/home/bv25/data/Ls154/Ls154_12.hdf5',
out_file='/home/bv25/data/Ls154/Ls154_results.hdf5',
min_maf=0.1, recalculate_kinship=True, chunk_size=1000):
"""
Apply the EMMAX algorithm to hdf5 formated genotype/phenotype data
"""
ih5f = h5py.File(hdf5_filename)
gg = ih5f['genot_data']
ig = ih5f['indiv_data']
n_indivs = len(ig['indiv_ids'][...])
if recalculate_kinship:
print 'Calculating kinship.'
k_mat = sp.zeros((n_indivs, n_indivs), dtype='single')
chromosomes = gg.keys()
n_snps = 0
for chrom in chromosomes:
print 'Working on Chromosome %s' % chrom
cg = gg[chrom]
freqs = cg['freqs'][...]
mafs = sp.minimum(freqs, 1 - freqs)
maf_filter = mafs > min_maf
print 'Filtered out %d SNPs with MAF<%0.2f.' % (len(maf_filter) - sum(maf_filter), min_maf)
snps = cg['raw_snps'][...]
snps = snps[maf_filter]
num_snps = len(snps)
for chunk_i, i in enumerate(range(0, num_snps, chunk_size)):
end_i = min(i + chunk_size, num_snps)
x = snps[i:end_i]
x = x.T
x = (x - sp.mean(x, 0)) / sp.std(x, 0)
x = x.T
n_snps += len(x)
k_mat += sp.dot(x.T, x)
del x
sys.stdout.write('\b\b\b\b\b\b\b%0.2f%%' % (100.0 * (min(1, ((chunk_i + 1.0) * chunk_size) / num_snps))))
sys.stdout.flush()
sys.stdout.write('\b\b\b\b\b\b\b100.00%\n')
k_mat = k_mat / float(n_snps)
c = sp.sum((sp.eye(len(k_mat)) - (1.0 / len(k_mat)) * sp.ones(k_mat.shape)) * sp.array(k_mat))
scalar = (len(k_mat) - 1) / c
print 'Kinship scaled by: %0.4f' % scalar
k = scalar * k_mat
else:
assert 'kinship' in ih5f.keys(), 'Kinship is missing. Please calculate that first!'
k = ih5f['kinship']
# Get the phenotypes
phenotypes = ig['phenotypes'][...]
# Initialize the mixed model
lmm = lm.LinearMixedModel(phenotypes)
lmm.add_random_effect(k)
# Calculate pseudo-heritability, etc.
print 'Calculating the eigenvalues of K'
s0 = time.time()
eig_L = lmm._get_eigen_L_()
print 'Done.'
print 'Took %0.2f seconds' % (time.time() - s0)
print "Calculating the eigenvalues of S(K+I)S where S = I-X(X'X)^-1X'"
s0 = time.time()
eig_R = lmm._get_eigen_R_(X=lmm.X)
print 'Done'
print 'Took %0.2f seconds' % (time.time() - s0)
print 'Getting variance estimates'
s0 = time.time()
res = lmm.get_estimates(eig_L, method='REML', eig_R=eig_R) # Get the variance estimates..
print 'Done.'
print 'Took %0.2f seconds' % (time.time() - s0)
print 'pseudo_heritability:', res['pseudo_heritability']
# Initialize results file
oh5f = h5py.File(out_file)
# Store phenotype_data
oh5f.create_dataset('pseudo_heritability', data=sp.array(res['pseudo_heritability']))
oh5f.create_dataset('ve', data=sp.array(res['ve']))
oh5f.create_dataset('vg', data=sp.array(res['vg']))
oh5f.create_dataset('max_ll', data=sp.array(res['max_ll']))
oh5f.create_dataset('num_snps', data=ih5f['num_snps'])
# Construct results data containers
chrom_res_group = oh5f.create_group('chrom_results')
for chrom in gg.keys():
crg = chrom_res_group.create_group(chrom)
# Get the SNPs
print 'Working on Chromosome: %s' % chrom
freqs = gg[chrom]['freqs'][...]
mafs = sp.minimum(freqs, 1 - freqs)
maf_filter = mafs > min_maf
print 'Filtered out %d SNPs with MAF<%0.2f.' % (len(maf_filter) - sum(maf_filter), min_maf)
snps = gg[chrom]['raw_snps'][...]
snps = snps[maf_filter]
positions = gg[chrom]['positions'][...]
positions = positions[maf_filter]
# Now run EMMAX
print "Running EMMAX"
s1 = time.time()
r = lmm._emmax_f_test_(snps, res['H_sqrt_inv'], with_betas=False, emma_num=0, eig_L=eig_L)
secs = time.time() - s1
if secs > 60:
mins = int(secs) / 60
secs = secs % 60
print 'Took %d mins and %0.1f seconds.' % (mins, secs)
else:
print 'Took %0.1f seconds.' % (secs)
crg.create_dataset('ps', data=r['ps'])
crg.create_dataset('positions', data=positions)
oh5f.flush()
ih5f.close()
oh5f.close()
def run_emmax_perm(hdf5_filename='/home/bv25/data/Ls154/Ls154_12.hdf5',
out_file='/home/bv25/data/Ls154/Ls154_results_perm.hdf5',
min_maf=0.1, recalculate_kinship=True, chunk_size=1000,
num_perm=500):
"""
Apply the EMMAX algorithm to hdf5 formated genotype/phenotype data
"""
ih5f = h5py.File(hdf5_filename)
gg = ih5f['genot_data']
ig = ih5f['indiv_data']
n_indivs = len(ig['indiv_ids'][...])
print 'Calculating kinship.'
k_mat = sp.zeros((n_indivs, n_indivs), dtype='single')
chromosomes = gg.keys()
# chromosomes = chromosomes[-1:]
n_snps = 0
for chrom in chromosomes:
print 'Working on Chromosome %s' % chrom
cg = gg[chrom]
freqs = cg['freqs'][...]
mafs = sp.minimum(freqs, 1 - freqs)
maf_filter = mafs > min_maf
print 'Filtered out %d SNPs with MAF<%0.2f.' % (len(maf_filter) - sum(maf_filter), min_maf)
snps = cg['raw_snps'][...]
snps = snps[maf_filter]
num_snps = len(snps)
for chunk_i, i in enumerate(range(0, num_snps, chunk_size)):
end_i = min(i + chunk_size, num_snps)
x = snps[i:end_i]
x = x.T
x = (x - sp.mean(x, 0)) / sp.std(x, 0)
x = x.T
n_snps += len(x)
k_mat += sp.dot(x.T, x)
del x
sys.stdout.write('\b\b\b\b\b\b\b%0.2f%%' % (100.0 * (min(1, ((chunk_i + 1.0) * chunk_size) / num_snps))))
sys.stdout.flush()
sys.stdout.write('\b\b\b\b\b\b\b100.00%\n')
k_mat = k_mat / float(n_snps)
c = sp.sum((sp.eye(len(k_mat)) - (1.0 / len(k_mat)) * sp.ones(k_mat.shape)) * sp.array(k_mat))
scalar = (len(k_mat) - 1) / c
print 'Kinship scaled by: %0.4f' % scalar
k = scalar * k_mat
# Store the kinship
# Initialize results file
oh5f = h5py.File(out_file)
oh5f.create_dataset('kinship', data=k)
oh5f.flush()
chromosomes = gg.keys()
num_tot_snps = 0
num_12_chr_snps = 0
for chrom in chromosomes:
cg = gg[chrom]
freqs = cg['freqs'][...]
mafs = sp.minimum(freqs, 1 - freqs)
maf_filter = mafs > min_maf
n_snps = sum(maf_filter)
num_tot_snps += n_snps
if chrom != chromosomes[-1]:
num_12_chr_snps += n_snps
# Get the phenotypes
phenotypes = ig['phenotypes'][...]
# Initialize the mixed model
lmm = lm.LinearMixedModel(phenotypes)
lmm.add_random_effect(k)
# Calculate pseudo-heritability, etc.
print 'Calculating the eigenvalues of K'
s0 = time.time()
eig_L = lmm._get_eigen_L_()
print 'Done.'
print 'Took %0.2f seconds' % (time.time() - s0)
print "Calculating the eigenvalues of S(K+I)S where S = I-X(X'X)^-1X'"
s0 = time.time()
eig_R = lmm._get_eigen_R_(X=lmm.X)
print 'Done'
print 'Took %0.2f seconds' % (time.time() - s0)
print 'Getting variance estimates'
s0 = time.time()
res = lmm.get_estimates(eig_L, method='REML', eig_R=eig_R) # Get the variance estimates..
print 'Done.'
print 'Took %0.2f seconds' % (time.time() - s0)
print 'pseudo_heritability:', res['pseudo_heritability']
# Store phenotype_data
oh5f.create_dataset('pseudo_heritability', data=sp.array(res['pseudo_heritability']))
oh5f.create_dataset('ve', data=sp.array(res['ve']))
oh5f.create_dataset('vg', data=sp.array(res['vg']))
oh5f.create_dataset('max_ll', data=sp.array(res['max_ll']))
oh5f.create_dataset('num_snps', data=sp.array(n_snps))
# Construct results data containers
chrom_res_group = oh5f.create_group('chrom_results')
# all_snps = sp.empty((n_snps, n_indivs))
chr12_snps = sp.empty((num_12_chr_snps, n_indivs))
i = 0
for chrom in gg.keys():
crg = chrom_res_group.create_group(chrom)
# Get the SNPs
print 'Working on Chromosome: %s' % chrom
freqs = gg[chrom]['freqs'][...]
mafs = sp.minimum(freqs, 1 - freqs)
maf_filter = mafs > min_maf
print 'Filtered out %d SNPs with MAF<%0.2f.' % (len(maf_filter) - sum(maf_filter), min_maf)
snps = gg[chrom]['raw_snps'][...]
snps = snps[maf_filter]
positions = gg[chrom]['positions'][...]
positions = positions[maf_filter]
n = len(snps)
# all_snps[i:i + n] = snps
if chrom != chromosomes[-1]:
chr12_snps[i:i + n] = snps
# Now run EMMAX
print "Running EMMAX"
s1 = time.time()
r = lmm._emmax_f_test_(snps, res['H_sqrt_inv'], with_betas=False, emma_num=0, eig_L=eig_L)
secs = time.time() - s1
if secs > 60:
mins = int(secs) / 60
secs = secs % 60
print 'Took %d mins and %0.1f seconds.' % (mins, secs)
else:
print 'Took %0.1f seconds.' % (secs)
crg.create_dataset('ps', data=r['ps'])
crg.create_dataset('positions', data=positions)
oh5f.flush()
i += n
print 'Starting permutation test for detecting the genome-wide significance threshold'
s1 = time.time()
perm_res = lmm._emmax_permutations_(chr12_snps, k, res['H_sqrt_inv'], num_perm=num_perm)
secs = time.time() - s1
if secs > 60:
mins = int(secs) / 60
secs = secs % 60
print 'Took %d mins and %0.1f seconds.' % (mins, secs)
else:
print 'Took %0.1f seconds.' % (secs)
perm_res['min_ps'].sort()
perm_res['max_f_stats'].sort()
perm_res['max_f_stats'][::-1] # reverse array
five_perc_i = int(num_perm / 20)
print "The 0.05 genome-wide significance threshold is %0.4e, and the corresponding statistic is %0.4e." % (perm_res['min_ps'][five_perc_i], perm_res['max_f_stats'][five_perc_i])
oh5f.create_dataset('perm_min_ps', data=perm_res['min_ps'])
oh5f.create_dataset('perm_max_f_stats', data=perm_res['max_f_stats'])
oh5f.create_dataset('five_perc_perm_min_ps', data=perm_res['min_ps'][five_perc_i])
oh5f.create_dataset('five_perc_perm_max_f_stats', data=perm_res['max_f_stats'][five_perc_i])
ih5f.close()
oh5f.close()
def qq_plot(hdf5_results_file='/home/bv25/data/Ls154/Ls154_results.hdf5',
png_file_prefix='/home/bv25/data/Ls154/Ls154_results'):
"""
Plot QQ-plot for a single HDF5 result
"""
h5f = h5py.File(hdf5_results_file)
chrom_res_group = h5f['chrom_results']
pvals = [] # sp.empty(h5f['num_snps'][...])
i = 0
for chrom in chrom_res_group.keys():
if chrom !='chrom_5':
crg = chrom_res_group[chrom]
n = len(crg['ps'])
# pvals[i:i + n] = crg['ps'][...]
pvals.extend(crg['ps'][...].tolist())
i += n
pvals = sp.array(pvals)
quantiles = agr.get_quantiles(pvals)
log_quantiles = agr.get_log_quantiles(pvals, max_val=7)
qq_plot_png_filename = png_file_prefix + '_qq.png'
qq_log_plot_png_filename = png_file_prefix + '_qq_log.png'
agr.simple_qqplot([quantiles], png_file=qq_plot_png_filename)
agr.simple_log_qqplot([log_quantiles], png_file=qq_log_plot_png_filename, max_val=7)
def manhattan_plot(hdf5_results_file='/home/bv25/data/Ls154/Ls154_results_perm.hdf5',
png_file='/home/bv25/data/Ls154/Ls154_results_manhattan.png',
max_log_pval=None, filter_pval=0.10, ylab="$-$log$_{10}(p-$value$)$", plot_bonferroni=True,
b_threshold=None, markersize=3, chrom_col_map=None):
"""
Plot a Manhattan plot for a single HDF5 result
"""
chrom_res_dict = {}
h5f = h5py.File(hdf5_results_file)
chrom_res_group = h5f['chrom_results']
num_snps = 0
for chrom in chrom_res_group.keys():
crg = chrom_res_group[chrom]
ps = crg['ps'][...]
positions = crg['positions'][...]
num_snps += len(positions)
ps_filter = ps < filter_pval
chrom_end = positions[-1]
chrom_res_dict[chrom] = {'log_ps':-sp.log10(ps[ps_filter]), 'positions': positions[ps_filter], 'chrom_end':chrom_end}
chromosomes = chrom_res_dict.keys()
chromosomes.sort()
if not max_log_pval:
max_log_pvals = []
for chrom in chromosomes:
max_log_pvals.append(chrom_res_dict[chrom]['log_ps'].max())
max_log_pval = max(max_log_pvals)
offset = 0
tick_positions = []
tick_strings = []
plt.figure(figsize=(11, 3.2))
plt.axes([0.045, 0.15, 0.95, 0.71])
chr_offsets = []
for chrom in chromosomes:
chr_offsets.append(offset)
log_ps = chrom_res_dict[chrom]['log_ps']
positions = chrom_res_dict[chrom]['positions']
plot_positions = offset + positions
pval_truc_filter = log_ps > max_log_pval
if sum(pval_truc_filter) > 0:
print '%d -log p-values were truncated at %0.f.' % max_log_pval
log_ps[pval_truc_filter] = max_log_pval
if not chrom_col_map:
plt.plot(plot_positions, log_ps, ".", markersize=markersize, alpha=0.7, mew=0)
else:
color = chrom_col_map[chrom]
plt.plot(plot_positions, log_ps, ".", markersize=markersize, alpha=0.7, color=color, mew=0)
chrom_end = chrom_res_dict[chrom]['chrom_end']
for j in range(offset, offset + chrom_end, 4000000):
tick_positions.append(j)
pos = (j - offset)
if pos % 8000000 == 0 and pos < chrom_end - 4000000 :
tick_strings.append(pos / 1000000)
else:
tick_strings.append('')
offset += chrom_end + 1 # one Mb buffer
if plot_bonferroni:
if not b_threshold:
b_threshold = -sp.log10(1.0 / (num_snps * 20.0))
plt.plot([0, offset], [b_threshold, b_threshold], color='g', linestyle=":", alpha=0.6, label='Bonferoni threshold')
if 'five_perc_perm_min_ps' in h5f.keys():
perm_min_ps = h5f['five_perc_perm_min_ps'][...]
perm_log_thres = -sp.log10(perm_min_ps)
plt.plot([0, offset], [perm_log_thres, perm_log_thres], color='b', linestyle="--", alpha=0.6, label='Permutation threshold')
plt.legend()
max_y = max(b_threshold, perm_log_thres, max_log_pval)
x_range = offset
plt.axis([-x_range * 0.01, x_range * 1.01, -0.05 * max_y, 1.2 * max_y])
else:
max_y = max(b_threshold, max_log_pval)
x_range = offset
plt.axis([-x_range * 0.01, x_range * 1.01, -0.05 * max_y, 1.05 * max_y])
h5f.close()
plt.xticks(tick_positions, tick_strings, fontsize='x-small')
plt.ylabel('$ - log(p - $value$)$')
# plt.xlabel("Chromosome")
# else:
plt.xlabel("Mb")
if png_file:
plt.savefig(png_file, format="png", dpi=300, bbox_inches='tight')
plt.clf()
plt.close()
def parse_cegs_drosophila_phenotypes(phenotype_file='/Users/bjarnivilhjalmsson/data/cegs_lehmann/allphenotypes_5.0_cleaned.tab.reps.hdf5',):
"""
Parser for CEGS Drosophila phenotype data
"""
import pylab
#Load phenotypes...
ph5f = h5py.File(phenotype_file)
#Now take the median and mean of all values for all individuals.
phen_dict = {}
for phen in ph5f.keys():
#First mated
Y_mated = ph5f[phen]['Y_mated'][...]
Z_mated = ph5f[phen]['Z_mated'][...]
sample_filter = sp.negative(sp.isnan(Y_mated))
Ys_sum = sp.dot(Y_mated[sample_filter], Z_mated[sample_filter])
rep_count = sp.dot(sp.ones(sum(sample_filter)), Z_mated[sample_filter])
Y_means = Ys_sum/rep_count
#Now calculate medians by iteration.
phen_vals_list = [[] for i in range(216)]
for i in range(len(Y_mated)):
ind_i = sp.where(1==Z_mated[i])[0][0]
phen_vals_list[ind_i].append(Y_mated[i])
medians = sp.zeros(216)
for i, pl in enumerate(phen_vals_list):
if len(pl)>0:
medians[i] = sp.median(pl)
else:
medians[i] = sp.nan
ind_filter = sp.negative(sp.isnan(Y_means))
if phen=='Triglyceride':
ind_filter = (Y_means>0)*ind_filter
phen_dict[phen]={'mated':{'Y_means':Y_means, 'rep_count':rep_count, 'ind_filter':ind_filter, 'Y_medians':medians}}
print 'Plotting phenotype histograms for %s, %s'%(phen,'mated')
mated_filtered_means = Y_means[ind_filter]
pylab.hist(mated_filtered_means)
pylab.savefig('/Users/bjarnivilhjalmsson/data/tmp/cegs_hist_%s_mated_means.png' % (phen))
pylab.clf()
mated_filtered_medians = medians[ind_filter]
pylab.hist(mated_filtered_medians)
pylab.savefig('/Users/bjarnivilhjalmsson/data/tmp/cegs_hist_%s_mated_medians.png' % (phen))
pylab.clf()
#Then virgin
Y_virgin = ph5f[phen]['Y_virgin'][...]
Z_virgin = ph5f[phen]['Z_virgin'][...]
sample_filter = sp.negative(sp.isnan(Y_virgin))
Ys_sum = sp.dot(Y_virgin[sample_filter], Z_virgin[sample_filter])
rep_count = sp.dot(sp.ones(sum(sample_filter)), Z_virgin[sample_filter])
Y_means = Ys_sum/rep_count
#Now calculate medians by iteration.
phen_vals_list = [[] for i in range(216)]
for i in range(len(Y_virgin)):
ind_i = sp.where(1==Z_virgin[i])[0][0]
phen_vals_list[ind_i].append(Y_virgin[i])
medians = sp.zeros(216)
for i, pl in enumerate(phen_vals_list):
if len(pl)>0:
medians[i] = sp.median(pl)
else:
medians[i] = sp.nan
ind_filter = sp.negative(sp.isnan(Y_means))
if phen=='Triglyceride':
ind_filter = (Y_means>0)*ind_filter
phen_dict[phen]['virgin']={'Y_means':Y_means, 'rep_count':rep_count, 'ind_filter':ind_filter, 'Y_medians':medians}
print 'Plotting phenotype histograms for %s, %s'%(phen,'virgin')
virgin_filtered_means = Y_means[ind_filter]
pylab.hist(virgin_filtered_means)
pylab.savefig('/Users/bjarnivilhjalmsson/data/tmp/cegs_hist_%s_virgin_means.png' % (phen))
pylab.clf()
virgin_filtered_medians = medians[ind_filter]
pylab.hist(virgin_filtered_medians)
pylab.savefig('/Users/bjarnivilhjalmsson/data/tmp/cegs_hist_%s_virgin_medians.png' % (phen))
pylab.clf()
means_corr = sp.corrcoef(mated_filtered_means, virgin_filtered_means)[0,1]
medians_corr = sp.corrcoef(mated_filtered_medians, virgin_filtered_medians)[0,1]
print 'Correlation between mated and virgin flies, means: %0.2f, medians: %0.2f'%(means_corr,medians_corr)
phen_dict[phen]['corrs'] = {'means':means_corr, 'medians':medians_corr}
return phen_dict
def coordinate_cegs_genotype_phenotype(phen_dict, phenotype='Protein',env='mated',k_thres=0.8, ind_missing_thres=0.5, snp_missing_thres=0.05, maf_thres=0.1,
genotype_file='/Users/bjarnivilhjalmsson/data/cegs_lehmann/CEGS.216.lines.NO_DPGP4.GATK.SNP.HETS.FILTERED.Filter_imputed.hdf5'):
"""
Parse genotypes and coordinate with phenotype, and ready data for analysis.
"""
gh5f = h5py.File(genotype_file)
p_dict = phen_dict[phenotype][env]
print 'Loading SNPs'
snps = sp.array(gh5f['gt'][...],dtype='single')
snps = snps[:,p_dict['ind_filter']]
positions = gh5f['pos'][...]
m,n = snps.shape
print 'Loaded %d SNPs for %d individuals'%(m,n)
print 'Filtering individuals with missing rates >%0.2f'%ind_missing_thres
missing_mat = sp.isnan(snps)
ind_missing_rates = sp.sum(missing_mat,0)/float(m)
ind_filter = ind_missing_rates<ind_missing_thres
snps = snps[:,ind_filter]
n = sp.sum(ind_filter)
print 'Filtered %d individuals due to high missing rates'%sp.sum(sp.negative(ind_filter))
gt_ids = gh5f['gt_ids'][p_dict['ind_filter']]
gt_ids = gt_ids[ind_filter]
Y_means = p_dict['Y_means'][p_dict['ind_filter']]
Y_means = Y_means[ind_filter]
Y_medians = p_dict['Y_medians'][p_dict['ind_filter']]
Y_medians = Y_medians[ind_filter]
rep_count = p_dict['rep_count'][p_dict['ind_filter']]
rep_count = rep_count[ind_filter]
print 'Now removing "bad" genotypes.'
bad_genotypes = ['Raleigh_272', 'Raleigh_378', 'Raleigh_554', 'Raleigh_591', 'Raleigh_398', 'Raleigh_138', 'Raleigh_208',
'Raleigh_336', 'Raleigh_370', 'Raleigh_373', 'Raleigh_374', 'Raleigh_799', 'Raleigh_821', 'Raleigh_822',
'Raleigh_884', 'Raleigh_335']
ind_filter = sp.negative(sp.in1d(gt_ids,bad_genotypes))
gt_ids = gt_ids[ind_filter]
Y_means= Y_means[ind_filter]
Y_medians= Y_medians[ind_filter]
rep_count= rep_count[ind_filter]
snps = snps[:,ind_filter]
print 'Removed %d "bad" genotypes'%sp.sum(sp.negative(ind_filter))
n = len(snps[0])
print 'Filtering SNPs with missing rate >%0.2f'%snp_missing_thres
missing_mat = sp.isnan(snps)
snp_missing_rates = sp.sum(missing_mat,1)/float(n)
snps_filter = snp_missing_rates<snp_missing_thres
snps = snps[snps_filter]
positions = positions[snps_filter]
m = sp.sum(snps_filter)
print 'Filtered %d SNPs due to high missing rate'%sp.sum(sp.negative(snps_filter))
print 'Now imputing (w mean)'
missing_mat = sp.isnan(snps)
ok_counts = n-sp.sum(missing_mat,1)
snps[missing_mat]=0
snp_means = sp.sum(snps,1)/ok_counts
# print snp_means.shape
# print snp_means[:10]
# import pdb
# pdb.set_trace()
for i in range(len(snps)):
snps[i,missing_mat[i]]=snp_means[i]
print 'And filtering SNPs with MAF<%0.2f'%maf_thres
snp_means = sp.mean(snps,1)
snp_mafs = sp.minimum(snp_means,1-snp_means)
snps_filter = snp_mafs>maf_thres
snps = snps[snps_filter]
positions = positions[snps_filter]
print 'Filtered %d SNPs with low MAFs'%sp.sum(sp.negative(snps_filter))
print 'Filtering based on kinship w threshold:',k_thres
import kinship
K = kinship.calc_ibd_kinship(snps)
print '\nKinship calculated'
K_ind_filter = []
for i in range(n):
K_ind_filter.append(not sp.any(K[i,i+1:n]>k_thres))
if sum(K_ind_filter)==n:
print 'No individuals were filtered based on kinship..'
else:
print 'Filtering %d individuals based on kinship.'%(n-sum(K_ind_filter))
K_ind_filter = sp.array(K_ind_filter)
gt_ids = gt_ids[K_ind_filter]
Y_means= Y_means[K_ind_filter]
Y_medians= Y_medians[K_ind_filter]
rep_count= rep_count[K_ind_filter]
snps = snps[:,K_ind_filter]
print 'Again filtering SNPs with MAF<%0.2f'%maf_thres
snp_means = sp.mean(snps,1)
snp_mafs = sp.minimum(snp_means,1-snp_means)
snps_filter = snp_mafs>maf_thres
snps = snps[snps_filter]
positions = positions[snps_filter]
print 'Filtered %d additional SNPs with low MAFs'%sp.sum(sp.negative(snps_filter))
print 'All filtering done.'
m,n = snps.shape
print 'In all there are %d SNPs remaining, for %d individuals.'%(m,n)
ret_dict = {'Y_means':Y_means, 'Y_medians':Y_medians, 'rep_count':rep_count, 'gt_ids':gt_ids,
'positions':positions, 'snps':snps}
return ret_dict
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import ctypes
import platform
import cgi
console_encoding = sys.getfilesystemencoding()
class print_style:
version = "1.0.2.0"
engine = None
theme = None
FC_BLACK = 0
FC_BLUE = 1
FC_GREEN = 2
FC_CYAN = 3
FC_RED = 4
FC_MAGENTA = 5
FC_YELLOW = 6
FC_WHITE = 7
BC_BLACK = 8
BC_BLUE = 9
BC_GREEN = 10
BC_CYAN = 11
BC_RED = 12
BC_MAGENTA = 13
BC_YELLOW = 14
BC_WHITE = 15
FW_BOLD = 16
def __contains__(self, value):
return False
"""
See https://msdn.microsoft.com/zh-cn/windows/apps/ms682088%28v=vs.100%29#_win32_character_attributes
for color codes
"""
class Win32ConsoleColor:
name = "windows console"
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0
FOREGROUND_BLUE = 0x01 # text color contains blue.
FOREGROUND_GREEN = 0x02 # text color contains green.
FOREGROUND_RED = 0x04 # text color contains red.
FOREGROUND_INTENSITY = 0x08 # text color is intensified.
BACKGROUND_BLUE = 0x10 # background color contains blue.
BACKGROUND_GREEN = 0x20 # background color contains green.
BACKGROUND_RED = 0x40 # background color contains red.
BACKGROUND_INTENSITY = 0x80 # background color is intensified.
COLOR_MAP = {
print_style.FC_BLACK: FOREGROUND_BLACK,
print_style.FC_BLUE: FOREGROUND_BLUE | FOREGROUND_INTENSITY,
print_style.FC_GREEN: FOREGROUND_GREEN | FOREGROUND_INTENSITY,
print_style.FC_CYAN: FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY,
print_style.FC_RED: FOREGROUND_RED | FOREGROUND_INTENSITY,
print_style.FC_MAGENTA: FOREGROUND_RED | FOREGROUND_BLUE | FOREGROUND_INTENSITY,
print_style.FC_YELLOW: FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_INTENSITY,
print_style.FC_WHITE: FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED,
print_style.BC_BLACK: FOREGROUND_BLACK,
print_style.BC_BLUE: BACKGROUND_BLUE,
print_style.BC_GREEN: BACKGROUND_GREEN,
print_style.BC_CYAN: BACKGROUND_BLUE | BACKGROUND_GREEN,
print_style.BC_RED: BACKGROUND_RED,
print_style.BC_MAGENTA: BACKGROUND_RED | BACKGROUND_BLUE,
print_style.BC_YELLOW: BACKGROUND_RED | BACKGROUND_GREEN,
print_style.BC_WHITE: BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE,
print_style.FW_BOLD: BACKGROUND_INTENSITY,
}
std_out_handle = None
std_err_handle = None
def get_cmd_color(self, handle=std_out_handle):
return (
Win32ConsoleColor.FOREGROUND_RED
| Win32ConsoleColor.FOREGROUND_GREEN
| Win32ConsoleColor.FOREGROUND_BLUE
)
def set_cmd_color(self, color, handle=std_out_handle):
"""(color) -> bit
Example: set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY)
"""
bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color)
return bool
def stdout_with_color(self, options, text):
style = Win32ConsoleColor.FOREGROUND_BLACK
for opt in options:
style = style | Win32ConsoleColor.COLOR_MAP[opt]
if style == Win32ConsoleColor.FOREGROUND_BLACK:
sys.stdout.write(text)
else:
old_style = self.get_cmd_color()
self.set_cmd_color(style, self.std_out_handle)
sys.stdout.write(text)
self.set_cmd_color(old_style, self.std_out_handle)
def stderr_with_color(self, options, text):
style = Win32ConsoleColor.FOREGROUND_BLACK
for opt in options:
style = style | Win32ConsoleColor.COLOR_MAP[opt]
if style == Win32ConsoleColor.FOREGROUND_BLACK:
sys.stderr.write(text)
else:
old_style = self.get_cmd_color()
self.set_cmd_color(style, self.std_err_handle)
sys.stderr.write(text)
self.set_cmd_color(old_style, self.std_err_handle)
class TermColor:
name = "terminal"
COLOR_MAP = {
print_style.FC_BLACK: "30",
print_style.FC_BLUE: "34",
print_style.FC_GREEN: "32",
print_style.FC_CYAN: "36",
print_style.FC_RED: "31",
print_style.FC_MAGENTA: "35",
print_style.FC_YELLOW: "33",
print_style.FC_WHITE: "37",
print_style.BC_BLACK: "40",
print_style.BC_BLUE: "44",
print_style.BC_GREEN: "42",
print_style.BC_CYAN: "46",
print_style.BC_RED: "41",
print_style.BC_MAGENTA: "45",
print_style.BC_YELLOW: "43",
print_style.BC_WHITE: "47",
print_style.FW_BOLD: "1",
}
def stdout_with_color(self, options, text):
style = []
for opt in options:
style.append(TermColor.COLOR_MAP[opt])
if len(style) > 0:
sys.stdout.write("\033[" + ";".join(style) + "m" + text + "\033[0m")
else:
sys.stdout.write(text)
def stderr_with_color(self, options, text):
style = []
for opt in options:
style.append(TermColor.COLOR_MAP[opt])
if len(style) > 0:
sys.stderr.write("\033[" + ";".join(style) + "m" + text + "\033[0m")
else:
sys.stderr.write(text)
class HtmlColor:
name = "html css"
COLOR_MAP = {
print_style.FC_BLACK: "color: {0}Black;",
print_style.FC_BLUE: "color: {0}Blue;",
print_style.FC_GREEN: "color: {0}Green;",
print_style.FC_CYAN: "color: {0}Cyan;",
print_style.FC_RED: "color: {0}Red;",
print_style.FC_MAGENTA: "color: {0}Magenta;",
print_style.FC_YELLOW: "color: {0}Yellow;",
print_style.FC_WHITE: "color: {0}White;",
print_style.BC_BLACK: "background-color: {0}Black;",
print_style.BC_BLUE: "background-color: {0}Blue;",
print_style.BC_GREEN: "background-color: {0}Green;",
print_style.BC_CYAN: "background-color: {0}Cyan;",
print_style.BC_RED: "background-color: {0}Red;",
print_style.BC_MAGENTA: "background-color: {0}Magenta;",
print_style.BC_YELLOW: "background-color: {0}Yellow;",
print_style.BC_WHITE: "background-color: {0}White;",
print_style.FW_BOLD: "font-weight: bold;",
}
def stdout_with_color(self, options, text):
style = []
for opt in options:
if print_style.theme:
style.append(HtmlColor.COLOR_MAP[opt].format(print_style.theme + "-"))
else:
style.append(HtmlColor.COLOR_MAP[opt].format(""))
if len(style) > 0:
sys.stdout.write(
'<span style="' + " ".join(style) + '">' + cgi.escape(text) + "</span>"
)
else:
sys.stdout.write(cgi.escape(text))
def stderr_with_color(self, options, text):
style = []
for opt in options:
if print_style.theme:
style.append(HtmlColor.COLOR_MAP[opt].format(print_style.theme + "-"))
else:
style.append(HtmlColor.COLOR_MAP[opt].format(""))
if len(style) > 0:
sys.stderr.write(
'<span style="' + " ".join(style) + '">' + cgi.escape(text) + "</span>"
)
else:
sys.stderr.write(cgi.escape(text))
class NoneColor:
name = "none"
def stdout_with_color(self, options, text):
sys.stdout.write(text)
def stderr_with_color(self, options, text):
sys.stderr.write(text)
def cprintf_set_mode(mode_name="auto"):
mode_name = mode_name.lower()
if not mode_name or mode_name == "auto":
# set by environment variable
if os.getenv("CPRINTF_MODE"):
cprintf_set_mode(os.getenv("CPRINTF_MODE"))
elif "windows" == platform.system().lower():
ostype_name = os.getenv("OSTYPE")
if ostype_name:
ostype_name = ostype_name.lower()
if "msys" == ostype_name or "cygwin" == ostype_name:
cprintf_set_mode("term")
return
term_name = os.getenv("TERM")
if term_name:
term_name = term_name.lower()
if "xterm" == term_name[0:5] or "vt" == term_name[0:2]:
cprintf_set_mode("term")
return
cprintf_set_mode("win32_console")
elif os.getenv("ANSI_COLORS_DISABLED") is None:
cprintf_set_mode("term")
else:
cprintf_set_mode("none")
elif mode_name == "none":
print_style.engine = NoneColor
elif mode_name == "term":
print_style.engine = TermColor
elif mode_name == "win32_console":
"""
See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winprog/winprog/windows_api_reference.asp
for information on Windows APIs.
"""
Win32ConsoleColor.std_out_handle = ctypes.windll.kernel32.GetStdHandle(
Win32ConsoleColor.STD_OUTPUT_HANDLE
)
Win32ConsoleColor.std_err_handle = ctypes.windll.kernel32.GetStdHandle(
Win32ConsoleColor.STD_ERROR_HANDLE
)
print_style.engine = Win32ConsoleColor
elif mode_name == "html":
print_style.engine = HtmlColor
else:
print_style.engine = NoneColor
def cprintf_set_theme(theme_name=None):
if theme_name is None:
if not os.getenv("CPRINTF_THEME") is None:
cprintf_set_theme(os.getenv("CPRINTF_THEME"))
else:
print_style.theme = theme_name
def cprintf_unpack_text(fmt, text):
if len(text) > 0:
try:
ret = fmt.format(*text)
return ret
except TypeError:
ret = fmt.decode("utf-8").encode(console_encoding).format(*text)
return ret
except EnvironmentError:
ret = fmt.decode("utf-8").encode(console_encoding).format(*text)
return ret
else:
return fmt
def cprintf_stdout(options, fmt, *text):
cp = print_style.engine()
cp.stdout_with_color(options, cprintf_unpack_text(fmt, text))
sys.stdout.flush()
def cprintf_stderr(options, fmt, *text):
cp = print_style.engine()
cp.stderr_with_color(options, cprintf_unpack_text(fmt, text))
sys.stderr.flush()
cprintf_set_mode("auto")
""" run as a executable """
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: %prog [options...] <format message> [format parameters...]"
parser = OptionParser(usage)
parser.disable_interspersed_args()
parser.add_option(
"-v",
"--version",
action="store_true",
help="show version and exit",
dest="version",
)
parser.add_option(
"-c",
"--color",
action="append",
help="set font color.(any of: black, blue, green, cyan, red, magenta, yellow, white)",
metavar="<color>",
dest="color",
)
parser.add_option(
"-b",
"--background-color",
action="append",
help="set background color.(any of: black, blue, green, cyan, red, magenta, yellow, white)",
metavar="<background color>",
dest="background_color",
)
parser.add_option(
"-B",
"--bold",
action="append_const",
help="set font weight to bold",
const=print_style.FW_BOLD,
dest="style",
)
parser.add_option(
"-m",
"--mode",
action="store",
help="set mode.(any of: auto, term, win32_console, none, html)",
metavar="<output mode>",
dest="mode",
)
parser.add_option(
"-s",
"--output-stream",
action="store",
help="set output stream.(any of: stdout, stderr)",
metavar="<ostream>",
dest="ostream",
default="stdout",
)
parser.add_option(
"-e",
action="store_true",
help="enable interpretation of backslash escapes(just like echo command in unix like system)",
dest="interp_bse",
default=False,
)
parser.add_option(
"-E",
action="store_false",
help="disable interpretation of backslash escapes(just like echo command in unix like system)",
dest="interp_bse",
)
parser.add_option(
"-t",
"--theme",
action="store",
help="set theme in html mode(light or dark)",
metavar="<theme>",
dest="theme",
default=None,
)
(options, left_args) = parser.parse_args()
print_stream = "stdout"
print_options = []
fc_list = ["FC_" + x.upper() for x in options.color or []]
bk_list = ["BC_" + y.upper() for y in options.background_color or []]
for style_list in [fc_list, bk_list]:
for style_name in style_list:
if style_name in print_style.__dict__:
print_options.append(print_style.__dict__[style_name])
for style_code in options.style or []:
print_options.append(style_code)
if options.mode:
cprintf_set_mode(options.mode)
if options.theme:
cprintf_set_theme(options.theme)
else:
cprintf_set_theme(None)
if options.version:
print(print_style.version)
print("Color Engine: " + print_style.engine.name)
exit(0)
if len(left_args) > 0:
if options.interp_bse:
for i in range(0, len(left_args)):
left_args[i] = eval(repr(left_args[i]).replace("\\\\", "\\"))
if "stdout" == options.ostream:
cprintf_stdout(print_options, *left_args)
else:
cprintf_stderr(print_options, *left_args)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import unittest
from pyowm.airpollutionapi30 import airpollution_client, airpollution_manager, coindex, so2index, ozone, no2index, airstatus
from pyowm.config import DEFAULT_CONFIG
from pyowm.constants import AIRPOLLUTION_API_VERSION
from pyowm.utils import timestamps
from tests.unit.airpollutionapi30.test_ozone import OZONE_JSON
from tests.unit.airpollutionapi30.test_coindex import COINDEX_JSON
from tests.unit.airpollutionapi30.test_no2index import NO2INDEX_JSON
from tests.unit.airpollutionapi30.test_so2index import SO2INDEX_JSON
from tests.unit.airpollutionapi30.test_airstatus import AIRSTATUS_JSON, AIRSTATUS_MULTIPLE_JSON
class TestAirPollutionManager(unittest.TestCase):
__test_instance = airpollution_manager.AirPollutionManager('fakeapikey', DEFAULT_CONFIG)
def mock_get_coi_returning_coindex_around_coords(self, params_dict):
return json.loads(COINDEX_JSON)
def mock_get_o3_returning_ozone_around_coords(self, params_dict):
return json.loads(OZONE_JSON)
def mock_get_no2_returning_no2index_around_coords(self, params_dict):
return json.loads(NO2INDEX_JSON)
def mock_get_air_pollution(self, params_dict):
return json.loads(AIRSTATUS_JSON)
def mock_get_forecast_air_pollution(self, params_dict):
return json.loads(AIRSTATUS_MULTIPLE_JSON)
def mock_get_historical_air_pollution(self, params_dict):
return json.loads(AIRSTATUS_MULTIPLE_JSON)
def mock_get_so2_returning_so2index_around_coords(self, params_dict):
return json.loads(SO2INDEX_JSON)
def test_instantiation_with_wrong_params(self):
self.assertRaises(AssertionError, airpollution_manager.AirPollutionManager, None, dict())
self.assertRaises(AssertionError, airpollution_manager.AirPollutionManager, 'apikey', None)
def test_get_uvindex_api_version(self):
result = self.__test_instance.airpollution_api_version()
self.assertIsInstance(result, tuple)
self.assertEqual(result, AIRPOLLUTION_API_VERSION)
def test_coindex_around_coords(self):
ref_to_original = airpollution_client.AirPollutionHttpClient.get_coi
airpollution_client.AirPollutionHttpClient.get_coi = \
self.mock_get_coi_returning_coindex_around_coords
result = self.__test_instance.coindex_around_coords(45, 9, interval='day')
airpollution_client.AirPollutionHttpClient.coi = ref_to_original
self.assertTrue(isinstance(result, coindex.COIndex))
self.assertIsNotNone(result.reference_time)
self.assertIsNotNone(result.reception_time())
loc = result.location
self.assertIsNotNone(loc)
self.assertIsNotNone(loc.lat)
self.assertIsNotNone(loc.lon)
self.assertIsNotNone(result.co_samples)
ref_to_original = airpollution_client.AirPollutionHttpClient.get_coi
airpollution_client.AirPollutionHttpClient.get_coi = \
self.mock_get_coi_returning_coindex_around_coords
result = self.__test_instance.coindex_around_coords(45, 9, interval=None)
airpollution_client.AirPollutionHttpClient.coi = ref_to_original
self.assertTrue(isinstance(result, coindex.COIndex))
self.assertEqual('year', result.interval)
def test_coindex_around_coords_fails_with_wrong_parameters(self):
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.coindex_around_coords, \
self.__test_instance, 43.7, -200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.coindex_around_coords, \
self.__test_instance, 43.7, 200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.coindex_around_coords, \
self.__test_instance, -200, 2.5)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.coindex_around_coords, \
self.__test_instance, 200, 2.5)
def test_ozone_around_coords(self):
ref_to_original = airpollution_client.AirPollutionHttpClient.get_o3
airpollution_client.AirPollutionHttpClient.get_o3 = \
self.mock_get_o3_returning_ozone_around_coords
result = self.__test_instance.ozone_around_coords(45, 9, interval='day')
airpollution_client.AirPollutionHttpClient.o3 = ref_to_original
self.assertTrue(isinstance(result, ozone.Ozone))
self.assertIsNotNone(result.reference_time)
self.assertIsNotNone(result.reception_time())
loc = result.location
self.assertIsNotNone(loc)
self.assertIsNotNone(loc.lat)
self.assertIsNotNone(loc.lon)
self.assertIsNotNone(result.du_value)
ref_to_original = airpollution_client.AirPollutionHttpClient.get_o3
airpollution_client.AirPollutionHttpClient.get_o3 = \
self.mock_get_o3_returning_ozone_around_coords
result = self.__test_instance.ozone_around_coords(45, 9, interval=None)
airpollution_client.AirPollutionHttpClient.o3 = ref_to_original
self.assertTrue(isinstance(result, ozone.Ozone))
self.assertEqual('year', result.interval)
def test_ozone_around_coords_fails_with_wrong_parameters(self):
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.ozone_around_coords, \
self.__test_instance, 43.7, -200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.ozone_around_coords, \
self.__test_instance, 43.7, 200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.ozone_around_coords, \
self.__test_instance, -200, 2.5)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.ozone_around_coords, \
self.__test_instance, 200, 2.5)
def test_no2index_around_coords(self):
ref_to_original = airpollution_client.AirPollutionHttpClient.get_no2
airpollution_client.AirPollutionHttpClient.get_no2 = \
self.mock_get_no2_returning_no2index_around_coords
result = self.__test_instance.no2index_around_coords(45, 9, interval='day')
airpollution_client.AirPollutionHttpClient.get_no2 = ref_to_original
self.assertTrue(isinstance(result, no2index.NO2Index))
self.assertIsNotNone(result.reference_time)
self.assertIsNotNone(result.reception_time())
loc = result.location
self.assertIsNotNone(loc)
self.assertIsNotNone(loc.lat)
self.assertIsNotNone(loc.lon)
self.assertIsNotNone(result.no2_samples)
ref_to_original = airpollution_client.AirPollutionHttpClient.get_no2
airpollution_client.AirPollutionHttpClient.get_no2 = \
self.mock_get_no2_returning_no2index_around_coords
result = self.__test_instance.no2index_around_coords(45, 9, interval=None)
airpollution_client.AirPollutionHttpClient.get_no2 = ref_to_original
self.assertTrue(isinstance(result, no2index.NO2Index))
self.assertEqual('year', result.interval)
def test_no2index_around_coords_fails_with_wrong_parameters(self):
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.no2index_around_coords, \
self.__test_instance, 43.7, -200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.no2index_around_coords, \
self.__test_instance, 43.7, 200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.no2index_around_coords, \
self.__test_instance, -200, 2.5)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.no2index_around_coords, \
self.__test_instance, 200, 2.5)
def test_so2index_around_coords(self):
ref_to_original = airpollution_client.AirPollutionHttpClient.get_so2
airpollution_client.AirPollutionHttpClient.get_so2 = \
self.mock_get_so2_returning_so2index_around_coords
result = self.__test_instance.so2index_around_coords(45, 9, interval='day')
airpollution_client.AirPollutionHttpClient.get_so2 = ref_to_original
self.assertTrue(isinstance(result, so2index.SO2Index))
self.assertIsNotNone(result.reference_time())
self.assertIsNotNone(result.reception_time())
loc = result.location
self.assertIsNotNone(loc)
self.assertIsNotNone(loc.lat)
self.assertIsNotNone(loc.lon)
self.assertIsNotNone(result.so2_samples)
self.assertIsNotNone(result.interval)
ref_to_original = airpollution_client.AirPollutionHttpClient.get_so2
airpollution_client.AirPollutionHttpClient.get_so2 = \
self.mock_get_so2_returning_so2index_around_coords
result = self.__test_instance.so2index_around_coords(45, 9, interval=None)
airpollution_client.AirPollutionHttpClient.get_so2 = ref_to_original
self.assertTrue(isinstance(result, so2index.SO2Index))
self.assertEqual('year', result.interval)
def test_so2index_around_coords_fails_with_wrong_parameters(self):
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.so2index_around_coords, \
self.__test_instance, 43.7, -200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.so2index_around_coords, \
self.__test_instance, 43.7, 200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.so2index_around_coords, \
self.__test_instance, -200, 2.5)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.so2index_around_coords, \
self.__test_instance, 200, 2.5)
def test_air_quality_at_coords(self):
ref_to_original = airpollution_client.AirPollutionHttpClient.get_air_pollution
airpollution_client.AirPollutionHttpClient.get_air_pollution = \
self.mock_get_air_pollution
result = self.__test_instance.air_quality_at_coords(45, 9)
airpollution_client.AirPollutionHttpClient.get_air_pollution = ref_to_original
self.assertTrue(isinstance(result, airstatus.AirStatus))
self.assertIsNotNone(result.reference_time)
self.assertIsNotNone(result.reception_time())
loc = result.location
self.assertIsNotNone(loc)
self.assertIsNotNone(loc.lat)
self.assertIsNotNone(loc.lon)
self.assertIsNotNone(result.air_quality_data)
def test_air_quality_at_coords_fails_with_wrong_parameters(self):
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_at_coords, \
self.__test_instance, 43.7, -200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_at_coords, \
self.__test_instance, 43.7, 200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_at_coords, \
self.__test_instance, -200, 2.5)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_at_coords, \
self.__test_instance, 200, 2.5)
def test_air_quality_forecast_at_coords(self):
ref_to_original = airpollution_client.AirPollutionHttpClient.get_forecast_air_pollution
airpollution_client.AirPollutionHttpClient.get_forecast_air_pollution = \
self.mock_get_forecast_air_pollution
result = self.__test_instance.air_quality_forecast_at_coords(45, 9)
airpollution_client.AirPollutionHttpClient.get_forecast_air_pollution = ref_to_original
self.assertTrue(isinstance(result, list))
for item in result:
self.assertIsInstance(item, airstatus.AirStatus)
self.assertIsNotNone(item.reference_time)
self.assertIsNotNone(item.reception_time())
loc = item.location
self.assertIsNotNone(loc)
self.assertIsNotNone(loc.lat)
self.assertIsNotNone(loc.lon)
self.assertIsNotNone(item.air_quality_data)
def test_air_quality_forecast_at_coords_fails_with_wrong_parameters(self):
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_forecast_at_coords, \
self.__test_instance, 43.7, -200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_forecast_at_coords, \
self.__test_instance, 43.7, 200.0)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_forecast_at_coords, \
self.__test_instance, -200, 2.5)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_forecast_at_coords, \
self.__test_instance, 200, 2.5)
def test_air_quality_history_at_coords(self):
ref_to_original = airpollution_client.AirPollutionHttpClient.get_historical_air_pollution
airpollution_client.AirPollutionHttpClient.get_historical_air_pollution = \
self.mock_get_historical_air_pollution
result = self.__test_instance.air_quality_history_at_coords(45, 9, 12345678)
airpollution_client.AirPollutionHttpClient.get_historical_air_pollution = ref_to_original
self.assertTrue(isinstance(result, list))
for item in result:
self.assertIsInstance(item, airstatus.AirStatus)
self.assertIsNotNone(item.reference_time)
self.assertIsNotNone(item.reception_time())
loc = item.location
self.assertIsNotNone(loc)
self.assertIsNotNone(loc.lat)
self.assertIsNotNone(loc.lon)
self.assertIsNotNone(item.air_quality_data)
def test_air_quality_history_at_coords_fails_with_wrong_parameters(self):
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_history_at_coords, \
self.__test_instance, 43.7, -200.0, 12345678, 12349999)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_history_at_coords, \
self.__test_instance, 43.7, 200.0, 12345678, 12349999)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_history_at_coords, \
self.__test_instance, -200, 2.5, 12345678, 12349999)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_history_at_coords, \
self.__test_instance, 200, 2.5, 12345678, 12349999)
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_history_at_coords, \
self.__test_instance, 200, 2.5, 'test')
self.assertRaises(ValueError, airpollution_manager.AirPollutionManager.air_quality_history_at_coords, \
self.__test_instance, 200, 2.5, 'test', 'test2')
def test_air_quality_history_at_coords_clips_end_param_to_current_timestamp(self):
now = timestamps.now(timeformat='unix')
end = now + 99999999999
def assert_clipped(obj, params_dict):
self.assertEqual(params_dict['end'], now)
airpollution_client.AirPollutionHttpClient.get_historical_air_pollution = assert_clipped
_ = self.__test_instance.air_quality_history_at_coords(45, 9, 12345678, end=end)
def test_repr(self):
print(self.__test_instance)
|
|
# -*- coding: utf-8 -*-
"""
theflasktest.testsuite.testing
~~~~~~~~~~~~~~~~~~~~~~~
Test client and more.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
from flask._compat import text_type
class TestToolsTestCase(FlaskTestCase):
def test_environ_defaults_from_config(self):
app = flask.Flask(__name__)
app.testing = True
app.config['SERVER_NAME'] = 'example.com:1234'
app.config['APPLICATION_ROOT'] = '/foo'
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
self.assert_equal(ctx.request.url, 'http://example.com:1234/foo/')
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'http://example.com:1234/foo/')
def test_environ_defaults(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
self.assert_equal(ctx.request.url, 'http://localhost/')
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'http://localhost/')
def test_redirect_keep_session(self):
app = flask.Flask(__name__)
app.secret_key = 'testing'
@app.route('/', methods=['GET', 'POST'])
def index():
if flask.request.method == 'POST':
return flask.redirect('/getsession')
flask.session['data'] = 'foo'
return 'index'
@app.route('/getsession')
def get_session():
return flask.session.get('data', '<missing>')
with app.test_client() as c:
rv = c.get('/getsession')
assert rv.data == b'<missing>'
rv = c.get('/')
assert rv.data == b'index'
assert flask.session.get('data') == 'foo'
rv = c.post('/', data={}, follow_redirects=True)
assert rv.data == b'foo'
# This support requires a new Werkzeug version
if not hasattr(c, 'redirect_client'):
assert flask.session.get('data') == 'foo'
rv = c.get('/getsession')
assert rv.data == b'foo'
def test_session_transactions(self):
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
@app.route('/')
def index():
return text_type(flask.session['foo'])
with app.test_client() as c:
with c.session_transaction() as sess:
self.assert_equal(len(sess), 0)
sess['foo'] = [42]
self.assert_equal(len(sess), 1)
rv = c.get('/')
self.assert_equal(rv.data, b'[42]')
with c.session_transaction() as sess:
self.assert_equal(len(sess), 1)
self.assert_equal(sess['foo'], [42])
def test_session_transactions_no_null_sessions(self):
app = flask.Flask(__name__)
app.testing = True
with app.test_client() as c:
try:
with c.session_transaction() as sess:
pass
except RuntimeError as e:
self.assert_in('Session backend did not open a session', str(e))
else:
self.fail('Expected runtime error')
def test_session_transactions_keep_context(self):
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
with app.test_client() as c:
rv = c.get('/')
req = flask.request._get_current_object()
self.assert_true(req is not None)
with c.session_transaction():
self.assert_true(req is flask.request._get_current_object())
def test_session_transaction_needs_cookies(self):
app = flask.Flask(__name__)
app.testing = True
c = app.test_client(use_cookies=False)
try:
with c.session_transaction() as s:
pass
except RuntimeError as e:
self.assert_in('cookies', str(e))
else:
self.fail('Expected runtime error')
def test_test_client_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
flask.g.value = 42
return 'Hello World!'
@app.route('/other')
def other():
1 // 0
with app.test_client() as c:
resp = c.get('/')
self.assert_equal(flask.g.value, 42)
self.assert_equal(resp.data, b'Hello World!')
self.assert_equal(resp.status_code, 200)
resp = c.get('/other')
self.assert_false(hasattr(flask.g, 'value'))
self.assert_in(b'Internal Server Error', resp.data)
self.assert_equal(resp.status_code, 500)
flask.g.value = 23
try:
flask.g.value
except (AttributeError, RuntimeError):
pass
else:
raise AssertionError('some kind of exception expected')
def test_reuse_client(self):
app = flask.Flask(__name__)
c = app.test_client()
with c:
self.assert_equal(c.get('/').status_code, 404)
with c:
self.assert_equal(c.get('/').status_code, 404)
def test_test_client_calls_teardown_handlers(self):
app = flask.Flask(__name__)
called = []
@app.teardown_request
def remember(error):
called.append(error)
with app.test_client() as c:
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [])
self.assert_equal(called, [None])
del called[:]
with app.test_client() as c:
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [None])
self.assert_equal(called, [None, None])
class SubdomainTestCase(FlaskTestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['SERVER_NAME'] = 'example.com'
self.client = self.app.test_client()
self._ctx = self.app.test_request_context()
self._ctx.push()
def tearDown(self):
if self._ctx is not None:
self._ctx.pop()
def test_subdomain(self):
@self.app.route('/', subdomain='<company_id>')
def view(company_id):
return company_id
url = flask.url_for('view', company_id='xxx')
response = self.client.get(url)
self.assert_equal(200, response.status_code)
self.assert_equal(b'xxx', response.data)
def test_nosubdomain(self):
@self.app.route('/<company_id>')
def view(company_id):
return company_id
url = flask.url_for('view', company_id='xxx')
response = self.client.get(url)
self.assert_equal(200, response.status_code)
self.assert_equal(b'xxx', response.data)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestToolsTestCase))
suite.addTest(unittest.makeSuite(SubdomainTestCase))
return suite
|
|
"""
This contains evaluation functions for expressions
They get bound as instances-methods to the CompValue objects from parserutils
using setEvalFn
"""
import sys
import re
import math
import random
import uuid
import hashlib
import urllib2
from decimal import Decimal, ROUND_HALF_UP, InvalidOperation
import operator as pyop # python operators
import isodate
from rdflib.plugins.sparql.parserutils import CompValue, Expr
from rdflib.plugins.sparql.datatypes import XSD_DTs, type_promotion
from rdflib import URIRef, BNode, Variable, Literal, XSD, RDF
from rdflib.term import Node
from pyparsing import ParseResults
from rdflib.plugins.sparql.sparql import SPARQLError, SPARQLTypeError
# closed namespace, langString isn't in it
RDF_langString = URIRef(RDF.uri + "langString")
def Builtin_IRI(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-iri
"""
a = expr.arg
if isinstance(a, URIRef):
return a
if isinstance(a, Literal):
return ctx.prologue.absolutize(URIRef(a))
raise SPARQLError('IRI function only accepts URIRefs or Literals/Strings!')
def Builtin_isBLANK(expr, ctx):
return Literal(isinstance(expr.arg, BNode))
def Builtin_isLITERAL(expr, ctx):
return Literal(isinstance(expr.arg, Literal))
def Builtin_isIRI(expr, ctx):
return Literal(isinstance(expr.arg, URIRef))
def Builtin_isNUMERIC(expr, ctx):
try:
numeric(expr.arg)
return Literal(True)
except:
return Literal(False)
def Builtin_BNODE(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-bnode
"""
a = expr.arg
if a is None:
return BNode()
if isinstance(a, Literal):
return ctx.bnodes[a] # defaultdict does the right thing
raise SPARQLError(
'BNode function only accepts no argument or literal/string')
def Builtin_ABS(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-abs
"""
return Literal(abs(numeric(expr.arg)))
def Builtin_IF(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-if
"""
return expr.arg2 if EBV(expr.arg1) else expr.arg3
def Builtin_RAND(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#idp2133952
"""
return Literal(random.random())
def Builtin_UUID(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-strdt
"""
return URIRef(uuid.uuid4().urn)
def Builtin_STRUUID(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-strdt
"""
return Literal(str(uuid.uuid4()))
def Builtin_MD5(expr, ctx):
s = string(expr.arg).encode("utf-8")
return Literal(hashlib.md5(s).hexdigest())
def Builtin_SHA1(expr, ctx):
s = string(expr.arg).encode("utf-8")
return Literal(hashlib.sha1(s).hexdigest())
def Builtin_SHA256(expr, ctx):
s = string(expr.arg).encode("utf-8")
return Literal(hashlib.sha256(s).hexdigest())
def Builtin_SHA384(expr, ctx):
s = string(expr.arg).encode("utf-8")
return Literal(hashlib.sha384(s).hexdigest())
def Builtin_SHA512(expr, ctx):
s = string(expr.arg).encode("utf-8")
return Literal(hashlib.sha512(s).hexdigest())
def Builtin_COALESCE(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-coalesce
"""
for x in expr.get('arg', variables=True):
if x is not None and not isinstance(x, (SPARQLError, Variable)):
return x
raise SPARQLError(
"COALESCE got no arguments that did not evaluate to an error")
def Builtin_CEIL(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-ceil
"""
l = expr.arg
return Literal(int(math.ceil(numeric(l))), datatype=l.datatype)
def Builtin_FLOOR(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-floor
"""
l = expr.arg
return Literal(int(math.floor(numeric(l))), datatype=l.datatype)
def Builtin_ROUND(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-round
"""
# This used to be just math.bound
# but in py3k bound was changed to
# "round-to-even" behaviour
# this is an ugly work-around
l = expr.arg
v = numeric(l)
v = int(Decimal(v).quantize(1, ROUND_HALF_UP))
return Literal(v, datatype=l.datatype)
def Builtin_REGEX(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-regex
Invokes the XPath fn:matches function to match text against a regular
expression pattern.
The regular expression language is defined in XQuery 1.0 and XPath 2.0
Functions and Operators section 7.6.1 Regular Expression Syntax
"""
text = string(expr.text)
pattern = string(expr.pattern)
flags = expr.flags
cFlag = 0
if flags:
# Maps XPath REGEX flags (http://www.w3.org/TR/xpath-functions/#flags)
# to Python's re flags
flagMap = dict(
[('i', re.IGNORECASE), ('s', re.DOTALL), ('m', re.MULTILINE)])
cFlag = reduce(pyop.or_, [flagMap.get(f, 0) for f in flags])
return Literal(bool(re.search(unicode(pattern), text, cFlag)))
def Builtin_REPLACE(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-substr
"""
text = string(expr.arg)
pattern = string(expr.pattern)
replacement = string(expr.replacement)
flags = expr.flags
# python uses \1, xpath/sparql uses $1
replacement = re.sub('\\$([0-9]*)', r'\\\1', replacement)
def _r(m):
# Now this is ugly.
# Python has a "feature" where unmatched groups return None
# then re.sub chokes on this.
# see http://bugs.python.org/issue1519638 , fixed and errs in py3.5
# this works around and hooks into the internal of the re module...
# the match object is replaced with a wrapper that
# returns "" instead of None for unmatched groups
class _m():
def __init__(self, m):
self.m = m
self.string = m.string
def group(self, n):
return m.group(n) or ""
return re._expand(pattern, _m(m), replacement)
cFlag = 0
if flags:
# Maps XPath REGEX flags (http://www.w3.org/TR/xpath-functions/#flags)
# to Python's re flags
flagMap = dict(
[('i', re.IGNORECASE), ('s', re.DOTALL), ('m', re.MULTILINE)])
cFlag = reduce(pyop.or_, [flagMap.get(f, 0) for f in flags])
# @@FIXME@@ either datatype OR lang, NOT both
# this is necessary due to different treatment of unmatched groups in
# python versions. see comments above in _r(m).
compat_r = unicode(replacement) if sys.version_info[:2] >= (3, 5) else _r
return Literal(re.sub(unicode(pattern), compat_r, text, cFlag),
datatype=text.datatype, lang=text.language)
def Builtin_STRDT(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-strdt
"""
return Literal(unicode(expr.arg1), datatype=expr.arg2)
def Builtin_STRLANG(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-strlang
"""
s = string(expr.arg1)
if s.language or s.datatype:
raise SPARQLError('STRLANG expects a simple literal')
# TODO: normalisation of lang tag to lower-case
# should probably happen in literal __init__
return Literal(unicode(s), lang=str(expr.arg2).lower())
def Builtin_CONCAT(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-concat
"""
# dt/lang passed on only if they all match
dt = set(x.datatype for x in expr.arg)
dt = dt.pop() if len(dt) == 1 else None
lang = set(x.language for x in expr.arg)
lang = lang.pop() if len(lang) == 1 else None
return Literal("".join(string(x)
for x in expr.arg), datatype=dt, lang=lang)
def _compatibleStrings(a, b):
string(a)
string(b)
if b.language and a.language != b.language:
raise SPARQLError('incompatible arguments to str functions')
def Builtin_STRSTARTS(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-strstarts
"""
a = expr.arg1
b = expr.arg2
_compatibleStrings(a, b)
return Literal(a.startswith(b))
def Builtin_STRENDS(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-strends
"""
a = expr.arg1
b = expr.arg2
_compatibleStrings(a, b)
return Literal(a.endswith(b))
def Builtin_STRBEFORE(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-strbefore
"""
a = expr.arg1
b = expr.arg2
_compatibleStrings(a, b)
i = a.find(b)
if i == -1:
return Literal("")
else:
return Literal(a[:i], lang=a.language, datatype=a.datatype)
def Builtin_STRAFTER(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-strafter
"""
a = expr.arg1
b = expr.arg2
_compatibleStrings(a, b)
i = a.find(b)
if i == -1:
return Literal("")
else:
return Literal(a[i + len(b):], lang=a.language, datatype=a.datatype)
def Builtin_CONTAINS(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-strcontains
"""
a = expr.arg1
b = expr.arg2
_compatibleStrings(a, b)
return Literal(b in a)
def Builtin_ENCODE_FOR_URI(expr, ctx):
return Literal(urllib2.quote(string(expr.arg).encode("utf-8")))
def Builtin_SUBSTR(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-substr
"""
a = string(expr.arg)
start = numeric(expr.start) - 1
length = expr.length
if length is not None:
length = numeric(length) + start
return Literal(a[start:length], lang=a.language, datatype=a.datatype)
def Builtin_STRLEN(e, ctx):
l = string(e.arg)
return Literal(len(l))
def Builtin_STR(e, ctx):
arg = e.arg
if isinstance(arg, SPARQLError):
raise arg
return Literal(unicode(arg)) # plain literal
def Builtin_LCASE(e, ctx):
l = string(e.arg)
return Literal(l.lower(), datatype=l.datatype, lang=l.language)
def Builtin_LANGMATCHES(e, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-langMatches
"""
langTag = string(e.arg1)
langRange = string(e.arg2)
if unicode(langTag) == "":
return Literal(False) # nothing matches empty!
return Literal(_lang_range_check(langRange, langTag))
def Builtin_NOW(e, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-now
"""
return Literal(ctx.now)
def Builtin_YEAR(e, ctx):
d = datetime(e.arg)
return Literal(d.year)
def Builtin_MONTH(e, ctx):
d = datetime(e.arg)
return Literal(d.month)
def Builtin_DAY(e, ctx):
d = datetime(e.arg)
return Literal(d.day)
def Builtin_HOURS(e, ctx):
d = datetime(e.arg)
return Literal(d.hour)
def Builtin_MINUTES(e, ctx):
d = datetime(e.arg)
return Literal(d.minute)
def Builtin_SECONDS(e, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-seconds
"""
d = datetime(e.arg)
return Literal(d.second, datatype=XSD.decimal)
def Builtin_TIMEZONE(e, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-timezone
:returns: the timezone part of arg as an xsd:dayTimeDuration.
:raises: an error if there is no timezone.
"""
dt = datetime(e.arg)
if not dt.tzinfo:
raise SPARQLError('datatime has no timezone: %s' % dt)
delta = dt.tzinfo.utcoffset(ctx.now)
d = delta.days
s = delta.seconds
neg = ""
if d < 0:
s = -24 * 60 * 60 * d - s
d = 0
neg = "-"
h = s / (60 * 60)
m = (s - h * 60 * 60) / 60
s = s - h * 60 * 60 - m * 60
tzdelta = "%sP%sT%s%s%s" % (neg,
"%dD" % d if d else "",
"%dH" % h if h else "",
"%dM" % m if m else "",
"%dS" % s if not d and not h and not m else "")
return Literal(tzdelta, datatype=XSD.dayTimeDuration)
def Builtin_TZ(e, ctx):
d = datetime(e.arg)
if not d.tzinfo:
return Literal("")
n = d.tzinfo.tzname(d)
if n == "UTC":
n = "Z"
return Literal(n)
def Builtin_UCASE(e, ctx):
l = string(e.arg)
return Literal(l.upper(), datatype=l.datatype, lang=l.language)
def Builtin_LANG(e, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-lang
Returns the language tag of ltrl, if it has one. It returns "" if ltrl has
no language tag. Note that the RDF data model does not include literals
with an empty language tag.
"""
l = literal(e.arg)
return Literal(l.language or "")
def Builtin_DATATYPE(e, ctx):
l = e.arg
if not isinstance(l, Literal):
raise SPARQLError('Can only get datatype of literal: %s' % l)
if l.language:
return RDF_langString
if not l.datatype and not l.language:
return XSD.string
return l.datatype
def Builtin_sameTerm(e, ctx):
a = e.arg1
b = e.arg2
return Literal(a == b)
def Builtin_BOUND(e, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-bound
"""
n = e.get('arg', variables=True)
return Literal(not isinstance(n, Variable))
def Builtin_EXISTS(e, ctx):
# damn...
from rdflib.plugins.sparql.evaluate import evalPart
exists = e.name == 'Builtin_EXISTS'
ctx = ctx.ctx.thaw(ctx) # hmm
for x in evalPart(ctx, e.graph):
return Literal(exists)
return Literal(not exists)
def Function(e, ctx):
"""
Custom functions (and casts!)
"""
if e.iri in XSD_DTs:
# a cast
if not e.expr:
raise SPARQLError("Nothing given to cast.")
if len(e.expr) > 1:
raise SPARQLError("Cannot cast more than one thing!")
x = e.expr[0]
if e.iri == XSD.string:
if isinstance(x, (URIRef, Literal)):
return Literal(x, datatype=XSD.string)
else:
raise SPARQLError(
"Cannot cast term %s of type %s" % (x, type(x)))
if not isinstance(x, Literal):
raise SPARQLError(
"Can only cast Literals to non-string data-types")
if x.datatype and not x.datatype in XSD_DTs:
raise SPARQLError(
"Cannot cast literal with unknown datatype: %s" % x.datatype)
if e.iri == XSD.dateTime:
if x.datatype and x.datatype not in (XSD.dateTime, XSD.string):
raise SPARQLError(
"Cannot cast %s to XSD:dateTime" % x.datatype)
try:
return Literal(isodate.parse_datetime(x), datatype=e.iri)
except:
raise SPARQLError("Cannot interpret '%s' as datetime" % x)
if x.datatype == XSD.dateTime:
raise SPARQLError("Cannot cast XSD.dateTime to %s" % e.iri)
if e.iri in (XSD.float, XSD.double):
try:
return Literal(float(x), datatype=e.iri)
except:
raise SPARQLError("Cannot interpret '%s' as float" % x)
elif e.iri == XSD.decimal:
if "e" in x or "E" in x: # SPARQL/XSD does not allow exponents in decimals
raise SPARQLError("Cannot interpret '%s' as decimal" % x)
try:
return Literal(Decimal(x), datatype=e.iri)
except:
raise SPARQLError("Cannot interpret '%s' as decimal" % x)
elif e.iri == XSD.integer:
try:
return Literal(int(x), datatype=XSD.integer)
except:
raise SPARQLError("Cannot interpret '%s' as int" % x)
elif e.iri == XSD.boolean:
# # I would argue that any number is True...
# try:
# return Literal(bool(int(x)), datatype=XSD.boolean)
# except:
if x.lower() in ("1", "true"):
return Literal(True)
if x.lower() in ("0", "false"):
return Literal(False)
raise SPARQLError("Cannot interpret '%s' as bool" % x)
else:
raise Exception("I do not know how to cast to %s" % e.iri)
else:
raise SPARQLError('Unknown function %s"%e.iri')
# TODO: Custom functions!
def UnaryNot(expr, ctx):
return Literal(not EBV(expr.expr))
def UnaryMinus(expr, ctx):
return Literal(-numeric(expr.expr))
def UnaryPlus(expr, ctx):
return Literal(+numeric(expr.expr))
def MultiplicativeExpression(e, ctx):
expr = e.expr
other = e.other
# because of the way the mul-expr production handled operator precedence
# we sometimes have nothing to do
if other is None:
return expr
try:
res = Decimal(numeric(expr))
for op, f in zip(e.op, other):
f = numeric(f)
if type(f) == float:
res = float(res)
if op == '*':
res *= f
else:
res /= f
except (InvalidOperation, ZeroDivisionError):
raise SPARQLError('divide by 0')
return Literal(res)
def AdditiveExpression(e, ctx):
expr = e.expr
other = e.other
# because of the way the add-expr production handled operator precedence
# we sometimes have nothing to do
if other is None:
return expr
res = numeric(expr)
dt = expr.datatype
for op, term in zip(e.op, other):
n = numeric(term)
if isinstance(n, Decimal) and isinstance(res, float):
n = float(n)
if isinstance(n, float) and isinstance(res, Decimal):
res = float(res)
dt = type_promotion(dt, term.datatype)
if op == '+':
res += n
else:
res -= n
return Literal(res, datatype=dt)
def RelationalExpression(e, ctx):
expr = e.expr
other = e.other
op = e.op
# because of the way the add-expr production handled operator precedence
# we sometimes have nothing to do
if other is None:
return expr
ops = dict([('>', lambda x, y: x.__gt__(y)),
('<', lambda x, y: x.__lt__(y)),
('=', lambda x, y: x.eq(y)),
('!=', lambda x, y: x.neq(y)),
('>=', lambda x, y: x.__ge__(y)),
('<=', lambda x, y: x.__le__(y)),
('IN', pyop.contains),
('NOT IN', lambda x, y: not pyop.contains(x, y))])
if op in ('IN', 'NOT IN'):
res = (op == 'NOT IN')
error = False
if other == RDF.nil:
other = []
for x in other:
try:
if x == expr:
return Literal(True ^ res)
except SPARQLError, e:
error = e
if not error:
return Literal(False ^ res)
else:
raise error
if not op in ('=', '!=', 'IN', 'NOT IN'):
if not isinstance(expr, Literal):
raise SPARQLError(
"Compare other than =, != of non-literals is an error: %s" %
expr)
if not isinstance(other, Literal):
raise SPARQLError(
"Compare other than =, != of non-literals is an error: %s" %
other)
else:
if not isinstance(expr, Node):
raise SPARQLError('I cannot compare this non-node: %s' % expr)
if not isinstance(other, Node):
raise SPARQLError('I cannot compare this non-node: %s' % other)
if isinstance(expr, Literal) and isinstance(other, Literal):
if expr.datatype != None and expr.datatype not in XSD_DTs and other.datatype != None and other.datatype not in XSD_DTs:
# in SPARQL for non-XSD DT Literals we can only do =,!=
if op not in ('=', '!='):
raise SPARQLError(
'Can only do =,!= comparisons of non-XSD Literals')
try:
r = ops[op](expr, other)
if r == NotImplemented:
raise SPARQLError('Error when comparing')
except TypeError, te:
raise SPARQLError(*te.args)
return Literal(r)
def ConditionalAndExpression(e, ctx):
# TODO: handle returned errors
expr = e.expr
other = e.other
# because of the way the add-expr production handled operator precedence
# we sometimes have nothing to do
if other is None:
return expr
return Literal(all(EBV(x) for x in [expr] + other))
def ConditionalOrExpression(e, ctx):
# TODO: handle errors
expr = e.expr
other = e.other
# because of the way the add-expr production handled operator precedence
# we sometimes have nothing to do
if other is None:
return expr
# A logical-or that encounters an error on only one branch
# will return TRUE if the other branch is TRUE and an error
# if the other branch is FALSE.
error = None
for x in [expr] + other:
try:
if EBV(x):
return Literal(True)
except SPARQLError, e:
error = e
if error:
raise error
return Literal(False)
def not_(arg):
return Expr('UnaryNot', UnaryNot, expr=arg)
def and_(*args):
if len(args) == 1:
return args[0]
return Expr('ConditionalAndExpression', ConditionalAndExpression,
expr=args[0], other=list(args[1:]))
TrueFilter = Expr('TrueFilter', lambda _1, _2: Literal(True))
def simplify(expr):
if isinstance(expr, ParseResults) and len(expr) == 1:
return simplify(expr[0])
if isinstance(expr, (list, ParseResults)):
return map(simplify, expr)
if not isinstance(expr, CompValue):
return expr
if expr.name.endswith('Expression'):
if expr.other is None:
return simplify(expr.expr)
for k in expr.keys():
expr[k] = simplify(expr[k])
# expr['expr']=simplify(expr.expr)
# expr['other']=simplify(expr.other)
return expr
def literal(s):
if not isinstance(s, Literal):
raise SPARQLError("Non-literal passed as string: %s" % s)
return s
def datetime(e):
if not isinstance(e, Literal):
raise SPARQLError("Non-literal passed as datetime: %s" % e)
if not e.datatype == XSD.dateTime:
raise SPARQLError(
"Literal with wrong datatype passed as datetime: %s" % e)
return e.toPython()
def string(s):
"""
Make sure the passed thing is a string literal
i.e. plain literal, xsd:string literal or lang-tagged literal
"""
if not isinstance(s, Literal):
raise SPARQLError("Non-literal passes as string: %s" % s)
if s.datatype and s.datatype != XSD.string:
raise SPARQLError(
"Non-string datatype-literal passes as string: %s" % s)
return s
def numeric(expr):
"""
return a number from a literal
http://www.w3.org/TR/xpath20/#promotion
or TypeError
"""
if not isinstance(expr, Literal):
raise SPARQLTypeError("%s is not a literal!" % expr)
if expr.datatype not in (XSD.float, XSD.double,
XSD.decimal, XSD.integer,
XSD.nonPositiveInteger, XSD.negativeInteger,
XSD.nonNegativeInteger, XSD.positiveInteger,
XSD.unsignedLong, XSD.unsignedInt,
XSD.unsignedShort, XSD.unsignedByte,
XSD.long, XSD.int, XSD.short, XSD.byte):
raise SPARQLTypeError("%s does not have a numeric datatype!" % expr)
return expr.toPython()
def EBV(rt):
"""
* If the argument is a typed literal with a datatype of xsd:boolean,
the EBV is the value of that argument.
* If the argument is a plain literal or a typed literal with a
datatype of xsd:string, the EBV is false if the operand value
has zero length; otherwise the EBV is true.
* If the argument is a numeric type or a typed literal with a datatype
derived from a numeric type, the EBV is false if the operand value is
NaN or is numerically equal to zero; otherwise the EBV is true.
* All other arguments, including unbound arguments, produce a type error.
"""
if isinstance(rt, Literal):
if rt.datatype == XSD.boolean:
return rt.toPython()
elif rt.datatype == XSD.string or rt.datatype is None:
return len(rt) > 0
else:
pyRT = rt.toPython()
if isinstance(pyRT, Literal):
# Type error, see: http://www.w3.org/TR/rdf-sparql-query/#ebv
raise SPARQLTypeError(
"http://www.w3.org/TR/rdf-sparql-query/#ebv - ' + \
'Could not determine the EBV for : %s" % rt)
else:
return bool(pyRT)
else:
raise SPARQLTypeError(
"http://www.w3.org/TR/rdf-sparql-query/#ebv - ' + \
'Only literals have Boolean values! %s" % rt)
def _lang_range_check(range, lang):
"""
Implementation of the extended filtering algorithm, as defined in point
3.3.2, of U{RFC 4647<http://www.rfc-editor.org/rfc/rfc4647.txt>}, on
matching language ranges and language tags.
Needed to handle the C{rdf:PlainLiteral} datatype.
@param range: language range
@param lang: language tag
@rtype: boolean
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
Taken from `RDFClosure/RestrictedDatatype.py`__
.. __:http://dev.w3.org/2004/PythonLib-IH/RDFClosure/RestrictedDatatype.py
"""
def _match(r, l):
"""
Matching of a range and language item: either range is a wildcard
or the two are equal
@param r: language range item
@param l: language tag item
@rtype: boolean
"""
return r == '*' or r == l
rangeList = range.strip().lower().split('-')
langList = lang.strip().lower().split('-')
if not _match(rangeList[0], langList[0]):
return False
if len(rangeList) > len(langList):
return False
return all(_match(*x) for x in zip(rangeList, langList))
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Wishart distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"WishartCholesky",
"WishartFull",
]
class _WishartLinearOperator(distribution.Distribution):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar number of degrees of freedom `df` and
an instance of `LinearOperator`, which provides matrix-free access to a
symmetric positive definite operator, which defines the scale matrix.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(X; df, scale) = det(X)**(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / Z
Z = 2**(0.5 df k) |det(scale)|**(0.5 df) Gamma_k(0.5 df)
```
where:
* `df >= k` denotes the degrees of freedom,
* `scale` is a symmetric, positive definite, `k x k` matrix,
* `Z` is the normalizing constant, and,
* `Gamma_k` is the [multivariate Gamma function](
https://en.wikipedia.org/wiki/Multivariate_gamma_function).
#### Examples
See `WishartFull`, `WishartCholesky` for examples of initializing and using
this class.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
scale_operator,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Construct Wishart distributions.
Args:
df: `float` or `double` tensor, the degrees of freedom of the
distribution(s). `df` must be greater than or equal to `k`.
scale_operator: `float` or `double` instance of `LinearOperator`.
cholesky_input_output_matrices: Python `bool`. Any function which whose
input or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example `log_prob` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if scale is not floating-type
TypeError: if scale.dtype != df.dtype
ValueError: if df < k, where scale operator event shape is
`(k, k)`
"""
parameters = dict(locals())
self._cholesky_input_output_matrices = cholesky_input_output_matrices
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[df, scale_operator]):
if not scale_operator.dtype.is_floating:
raise TypeError(
"scale_operator.dtype=%s is not a floating-point type" %
scale_operator.dtype)
if not scale_operator.is_square:
print(scale_operator.to_dense().eval())
raise ValueError("scale_operator must be square.")
self._scale_operator = scale_operator
self._df = ops.convert_to_tensor(
df,
dtype=scale_operator.dtype,
name="df")
contrib_tensor_util.assert_same_float_dtype(
(self._df, self._scale_operator))
if (self._scale_operator.shape.ndims is None or
self._scale_operator.shape.dims[-1].value is None):
self._dimension = math_ops.cast(
self._scale_operator.domain_dimension_tensor(),
dtype=self._scale_operator.dtype, name="dimension")
else:
self._dimension = ops.convert_to_tensor(
self._scale_operator.shape.dims[-1].value,
dtype=self._scale_operator.dtype, name="dimension")
df_val = tensor_util.constant_value(self._df)
dim_val = tensor_util.constant_value(self._dimension)
if df_val is not None and dim_val is not None:
df_val = np.asarray(df_val)
if not df_val.shape:
df_val = [df_val]
if any(df_val < dim_val):
raise ValueError(
"Degrees of freedom (df = %s) cannot be less than "
"dimension of scale matrix (scale.dimension = %s)"
% (df_val, dim_val))
elif validate_args:
assertions = check_ops.assert_less_equal(
self._dimension, self._df,
message=("Degrees of freedom (df = %s) cannot be "
"less than dimension of scale matrix "
"(scale.dimension = %s)" %
(self._dimension, self._df)))
self._df = control_flow_ops.with_dependencies(
[assertions], self._df)
super(_WishartLinearOperator, self).__init__(
dtype=self._scale_operator.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=([self._df, self._dimension] +
self._scale_operator.graph_parents),
name=name)
@property
def df(self):
"""Wishart distribution degree(s) of freedom."""
return self._df
def _square_scale_operator(self):
return self.scale_operator.matmul(
self.scale_operator.to_dense(), adjoint_arg=True)
def scale(self):
"""Wishart distribution scale matrix."""
if self._cholesky_input_output_matrices:
return self.scale_operator.to_dense()
else:
return self._square_scale_operator()
@property
def scale_operator(self):
"""Wishart distribution scale matrix as an Linear Operator."""
return self._scale_operator
@property
def cholesky_input_output_matrices(self):
"""Boolean indicating if `Tensor` input/outputs are Cholesky factorized."""
return self._cholesky_input_output_matrices
@property
def dimension(self):
"""Dimension of underlying vector space. The `p` in `R^(p*p)`."""
return self._dimension
def _event_shape_tensor(self):
dimension = self.scale_operator.domain_dimension_tensor()
return array_ops.stack([dimension, dimension])
def _event_shape(self):
dimension = self.scale_operator.domain_dimension
return tensor_shape.TensorShape([dimension, dimension])
def _batch_shape_tensor(self):
return self.scale_operator.batch_shape_tensor()
def _batch_shape(self):
return self.scale_operator.batch_shape
def _sample_n(self, n, seed):
batch_shape = self.batch_shape_tensor()
event_shape = self.event_shape_tensor()
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = array_ops.concat([[n], batch_shape, event_shape], 0)
# Complexity: O(nbk**2)
x = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
expanded_df = self.df * array_ops.ones(
self.scale_operator.batch_shape_tensor(),
dtype=self.df.dtype.base_dtype)
g = random_ops.random_gamma(shape=[n],
alpha=self._multi_gamma_sequence(
0.5 * expanded_df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(
seed, "wishart"))
# Complexity: O(nbk**2)
x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = array_ops.matrix_set_diag(x, math_ops.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk**2)
perm = array_ops.concat([math_ops.range(1, ndims), [0]], 0)
x = array_ops.transpose(x, perm)
shape = array_ops.concat([batch_shape, [event_shape[0]], [-1]], 0)
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for LinearOperatorDiag, each matmul is O(k**2), so
# this complexity is O(nbk**2). For LinearOperatorLowerTriangular,
# each matmul is O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator.matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk**2)
shape = array_ops.concat([batch_shape, event_shape, [n]], 0)
x = array_ops.reshape(x, shape)
perm = array_ops.concat([[ndims - 1], math_ops.range(0, ndims - 1)], 0)
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
# Complexity: O(nbk^3)
x = math_ops.matmul(x, x, adjoint_b=True)
return x
def _log_prob(self, x):
if self.cholesky_input_output_matrices:
x_sqrt = x
else:
# Complexity: O(nbk^3)
x_sqrt = linalg_ops.cholesky(x)
batch_shape = self.batch_shape_tensor()
event_shape = self.event_shape_tensor()
ndims = array_ops.rank(x_sqrt)
# sample_ndims = ndims - batch_ndims - event_ndims
sample_ndims = ndims - array_ops.shape(batch_shape)[0] - 2
sample_shape = array_ops.strided_slice(
array_ops.shape(x_sqrt), [0], [sample_ndims])
# We need to be able to pre-multiply each matrix by its corresponding
# batch scale matrix. Since a Distribution Tensor supports multiple
# samples per batch, this means we need to reshape the input matrix `x`
# so that the first b dimensions are batch dimensions and the last two
# are of shape [dimension, dimensions*number_of_samples]. Doing these
# gymnastics allows us to do a batch_solve.
#
# After we're done with sqrt_solve (the batch operation) we need to undo
# this reshaping so what we're left with is a Tensor partitionable by
# sample, batch, event dimensions.
# Complexity: O(nbk**2) since transpose must access every element.
scale_sqrt_inv_x_sqrt = x_sqrt
perm = array_ops.concat([math_ops.range(sample_ndims, ndims),
math_ops.range(0, sample_ndims)], 0)
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
shape = array_ops.concat(
(batch_shape, (math_ops.cast(
self.dimension, dtype=dtypes.int32), -1)),
0)
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
# Complexity: O(nbM*k) where M is the complexity of the operator solving
# a vector system. E.g., for LinearOperatorDiag, each solve is O(k), so
# this complexity is O(nbk**2). For LinearOperatorLowerTriangular,
# each solve is O(k**2) so this step has complexity O(nbk^3).
scale_sqrt_inv_x_sqrt = self.scale_operator.solve(
scale_sqrt_inv_x_sqrt)
# Undo make batch-op ready.
# Complexity: O(nbk**2)
shape = array_ops.concat([batch_shape, event_shape, sample_shape], 0)
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
perm = array_ops.concat([math_ops.range(ndims - sample_ndims, ndims),
math_ops.range(0, ndims - sample_ndims)], 0)
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
# Write V = SS', X = LL'. Then:
# tr[inv(V) X] = tr[inv(S)' inv(S) L L']
# = tr[inv(S) L L' inv(S)']
# = tr[(inv(S) L) (inv(S) L)']
# = sum_{ik} (inv(S) L)_{ik}**2
# The second equality follows from the cyclic permutation property.
# Complexity: O(nbk**2)
trace_scale_inv_x = math_ops.reduce_sum(
math_ops.square(scale_sqrt_inv_x_sqrt),
axis=[-2, -1])
# Complexity: O(nbk)
half_log_det_x = math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(x_sqrt)),
axis=[-1])
# Complexity: O(nbk**2)
log_prob = ((self.df - self.dimension - 1.) * half_log_det_x -
0.5 * trace_scale_inv_x -
self.log_normalization())
# Set shape hints.
# Try to merge what we know from the input then what we know from the
# parameters of this distribution.
if x.get_shape().ndims is not None:
log_prob.set_shape(x.get_shape()[:-2])
if (log_prob.get_shape().ndims is not None and
self.batch_shape.ndims is not None and
self.batch_shape.ndims > 0):
log_prob.get_shape()[-self.batch_shape.ndims:].merge_with(
self.batch_shape)
return log_prob
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _entropy(self):
half_dp1 = 0.5 * self.dimension + 0.5
half_df = 0.5 * self.df
return (self.dimension * (half_df + half_dp1 * math.log(2.)) +
2 * half_dp1 * self.scale_operator.log_abs_determinant() +
self._multi_lgamma(half_df, self.dimension) +
(half_dp1 - half_df) * self._multi_digamma(half_df, self.dimension))
def _mean(self):
if self.cholesky_input_output_matrices:
return (math_ops.sqrt(self.df)
* self.scale_operator.to_dense())
return self.df * self._square_scale_operator()
def _variance(self):
x = math_ops.sqrt(self.df) * self._square_scale_operator()
d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.matmul(d, d, adjoint_b=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
def _stddev(self):
if self.cholesky_input_output_matrices:
raise ValueError(
"Computing std. dev. when is cholesky_input_output_matrices=True "
"does not make sense.")
return linalg_ops.cholesky(self.variance())
def _mode(self):
s = self.df - self.dimension - 1.
s = array_ops.where(
math_ops.less(s, 0.),
constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"),
s)
if self.cholesky_input_output_matrices:
return math_ops.sqrt(s) * self.scale_operator.to_dense()
return s * self._square_scale_operator()
def mean_log_det(self, name="mean_log_det"):
"""Computes E[log(det(X))] under this Wishart distribution."""
with self._name_scope(name):
return (self._multi_digamma(0.5 * self.df, self.dimension) +
self.dimension * math.log(2.) +
2 * self.scale_operator.log_abs_determinant())
def log_normalization(self, name="log_normalization"):
"""Computes the log normalizing constant, log(Z)."""
with self._name_scope(name):
return (self.df * self.scale_operator.log_abs_determinant() +
0.5 * self.df * self.dimension * math.log(2.) +
self._multi_lgamma(0.5 * self.df, self.dimension))
def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
"""Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
with self._name_scope(name, values=[a, p]):
# Linspace only takes scalars, so we'll add in the offset afterwards.
seq = math_ops.linspace(
constant_op.constant(0., dtype=self.dtype),
0.5 - 0.5 * p,
math_ops.cast(p, dtypes.int32))
return seq + array_ops.expand_dims(a, [-1])
def _multi_lgamma(self, a, p, name="multi_lgamma"):
"""Computes the log multivariate gamma function; log(Gamma_p(a))."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return (0.25 * p * (p - 1.) * math.log(math.pi) +
math_ops.reduce_sum(math_ops.lgamma(seq),
axis=[-1]))
def _multi_digamma(self, a, p, name="multi_digamma"):
"""Computes the multivariate digamma function; Psi_p(a)."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return math_ops.reduce_sum(math_ops.digamma(seq),
axis=[-1])
class WishartCholesky(_WishartLinearOperator):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
lower, triangular Cholesky factor which characterizes the scale matrix.
Using WishartCholesky is a constant-time improvement over WishartFull. It
saves an O(nbk^3) operation, i.e., a matrix-product operation for sampling
and a Cholesky factorization in log_prob. For most use-cases it often saves
another O(nbk^3) operation since most uses of Wishart will also use the
Cholesky factorization.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(X; df, scale) = det(X)**(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / Z
Z = 2**(0.5 df k) |det(scale)|**(0.5 df) Gamma_k(0.5 df)
```
where:
* `df >= k` denotes the degrees of freedom,
* `scale` is a symmetric, positive definite, `k x k` matrix,
* `Z` is the normalizing constant, and,
* `Gamma_k` is the [multivariate Gamma function](
https://en.wikipedia.org/wiki/Multivariate_gamma_function).
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3x3 Wishart with Cholesky factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
chol_scale = tf.cholesky(...) # Shape is [3, 3].
dist = tfd.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.prob(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.prob(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Cholesky factored scale matrices.
df = [5, 4]
chol_scale = tf.cholesky(...) # Shape is [2, 3, 3].
dist = tfd.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3].
dist.prob(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tfp.distributions.matrix_diag_transform.
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartCholesky"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The Cholesky factorization of
the symmetric positive definite scale matrix of the distribution.
cholesky_input_output_matrices: Python `bool`. Any function which whose
input or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example `log_prob` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
with ops.name_scope("init", values=[scale]):
scale = ops.convert_to_tensor(scale)
if validate_args:
scale = control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(scale),
message="scale must be positive definite"),
check_ops.assert_equal(
array_ops.shape(scale)[-1],
array_ops.shape(scale)[-2],
message="scale must be square")
] if validate_args else [], scale)
super(WishartCholesky, self).__init__(
df=df,
scale_operator=linalg.LinearOperatorLowerTriangular(
tril=scale,
is_non_singular=True,
is_positive_definite=True,
is_square=True),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
class WishartFull(_WishartLinearOperator):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
symmetric, positive definite scale matrix.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations
where `(k, k)` is the event space shape.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(X; df, scale) = det(X)**(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / Z
Z = 2**(0.5 df k) |det(scale)|**(0.5 df) Gamma_k(0.5 df)
```
where:
* `df >= k` denotes the degrees of freedom,
* `scale` is a symmetric, positive definite, `k x k` matrix,
* `Z` is the normalizing constant, and,
* `Gamma_k` is the [multivariate Gamma function](
https://en.wikipedia.org/wiki/Multivariate_gamma_function).
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3x3 Wishart with Full factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
scale = ... # Shape is [3, 3]; positive definite.
dist = tfd.WishartFull(df=df, scale=scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.prob(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.prob(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Full factored scale matrices.
df = [5, 4]
scale = ... # Shape is [2, 3, 3].
dist = tfd.WishartFull(df=df, scale=scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3]; xi is positive definite.
dist.prob(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tfd.matrix_diag_transform.
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartFull"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The symmetric positive definite
scale matrix of the distribution.
cholesky_input_output_matrices: Python `bool`. Any function which whose
input or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example `log_prob` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[scale]):
scale = ops.convert_to_tensor(scale)
if validate_args:
scale = distribution_util.assert_symmetric(scale)
chol = linalg_ops.cholesky(scale)
chol = control_flow_ops.with_dependencies([
check_ops.assert_positive(array_ops.matrix_diag_part(chol))
] if validate_args else [], chol)
super(WishartFull, self).__init__(
df=df,
scale_operator=linalg.LinearOperatorLowerTriangular(
tril=chol,
is_non_singular=True,
is_positive_definite=True,
is_square=True),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
|
"""Support for Acrobat Forms in ReportLab documents
This module is somewhat experimental at this time.
Includes basic support for
textfields,
select fields (drop down lists), and
check buttons.
The public interface consists of functions at the moment.
At some later date these operations may be made into canvas
methods. (comments?)
The ...Absolute(...) functions position the fields with respect
to the absolute canvas coordinate space -- that is, they do not
respect any coordinate transforms in effect for the canvas.
The ...Relative(...) functions position the ONLY THE LOWER LEFT
CORNER of the field using the coordinate transform in effect for
the canvas. THIS WILL ONLY WORK CORRECTLY FOR TRANSLATED COORDINATES
-- THE SHAPE, SIZE, FONTSIZE, AND ORIENTATION OF THE FIELD WILL NOT BE EFFECTED
BY SCALING, ROTATION, SKEWING OR OTHER NON-TRANSLATION COORDINATE
TRANSFORMS.
Please note that all field names (titles) in a given document must be unique.
Textfields and select fields only support the "base 14" canvas fonts
at this time.
See individual function docstrings below for more information.
The function test1(...) generates a simple test file.
THIS CONTRIBUTION WAS COMMISSIONED BY REPORTLAB USERS
WHO WISH TO REMAIN ANONYMOUS.
"""
### NOTE: MAKE THE STRING FORMATS DYNAMIC IN PATTERNS TO SUPPORT ENCRYPTION XXXX
import string
from reportlab.pdfbase.pdfdoc import LINEEND, PDFString, PDFStream, PDFDictionary, PDFName
from reportlab.lib.colors import obj_R_G_B
#==========================public interfaces
def textFieldAbsolute(canvas, title, x, y, width, height, value="", maxlen=1000000, multiline=0):
"""Place a text field on the current page
with name title at ABSOLUTE position (x,y) with
dimensions (width, height), using value as the default value and
maxlen as the maximum permissible length. If multiline is set make
it a multiline field.
"""
theform = getForm(canvas)
return theform.textField(canvas, title, x, y, x+width, y+height, value, maxlen, multiline)
def textFieldRelative(canvas, title, xR, yR, width, height, value="", maxlen=1000000, multiline=0):
"same as textFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return textFieldAbsolute(canvas, title, xA, yA, width, height, value, maxlen, multiline)
def buttonFieldAbsolute(canvas, title, value, x, y):
"""Place a check button field on the current page
with name title and default value value (one of "Yes" or "Off")
at ABSOLUTE position (x,y).
"""
theform = getForm(canvas)
return theform.buttonField(canvas, title, value, x, y)
def buttonFieldRelative(canvas, title, value, xR, yR):
"same as buttonFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return buttonFieldAbsolute(canvas, title, value, xA, yA)
def selectFieldAbsolute(canvas, title, value, options, x, y, width, height):
"""Place a select field (drop down list) on the current page
with name title and
with options listed in the sequence options
default value value (must be one of options)
at ABSOLUTE position (x,y) with dimensions (width, height)."""
theform = getForm(canvas)
theform.selectField(canvas, title, value, options, x, y, x+width, y+height)
def selectFieldRelative(canvas, title, value, options, xR, yR, width, height):
"same as textFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return selectFieldAbsolute(canvas, title, value, options, xA, yA, width, height)
def test1():
from reportlab.pdfgen import canvas
fn = "formtest1.pdf"
c = canvas.Canvas(fn)
# first page
c.setFont("Courier", 10)
c.drawString(100, 500, "hello world")
textFieldAbsolute(c, "fieldA", 100, 600, 100, 20, "default value")
textFieldAbsolute(c, "fieldB", 100, 300, 100, 50, "another default value", multiline=1)
selectFieldAbsolute(c, "fieldC", "France", ["Canada", "France", "China"], 100, 200, 100, 20)
c.rect(100, 600, 100, 20)
buttonFieldAbsolute(c, "field2", "Yes", 100, 700)
c.rect(100, 700, 20, 20)
buttonFieldAbsolute(c, "field3", "Off", 100, 800)
c.rect(100, 800, 20, 20)
# second page
c.showPage()
c.setFont("Helvetica", 7)
c.translate(50, 20)
c.drawString(100, 500, "hello world")
textFieldRelative(c, "fieldA_1", 100, 600, 100, 20, "default value 2")
c.setStrokeColorRGB(1,0,0)
c.setFillColorRGB(0,1,0.5)
textFieldRelative(c, "fieldB_1", 100, 300, 100, 50, "another default value 2", multiline=1)
selectFieldRelative(c, "fieldC_1", "France 1", ["Canada 0", "France 1", "China 2"], 100, 200, 100, 20)
c.rect(100, 600, 100, 20)
buttonFieldRelative(c, "field2_1", "Yes", 100, 700)
c.rect(100, 700, 20, 20)
buttonFieldRelative(c, "field3_1", "Off", 100, 800)
c.rect(100, 800, 20, 20)
c.save()
print "wrote", fn
#==========================end of public interfaces
from pdfpattern import PDFPattern
def getForm(canvas):
"get form from canvas, create the form if needed"
try:
return canvas.AcroForm
except AttributeError:
theform = canvas.AcroForm = AcroForm()
# install the form in the document
d = canvas._doc
cat = d._catalog
cat.AcroForm = theform
return theform
class AcroForm:
def __init__(self):
self.fields = []
def textField(self, canvas, title, xmin, ymin, xmax, ymax, value="", maxlen=1000000, multiline=0):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
# determine text info
R, G, B = obj_R_G_B(canvas._fillColorObj)
#print "rgb", (R,G,B)
font = canvas. _fontname
fontsize = canvas. _fontsize
field = TextField(title, value, xmin, ymin, xmax, ymax, page, maxlen,
font, fontsize, R, G, B, multiline)
self.fields.append(field)
canvas._addAnnotation(field)
def selectField(self, canvas, title, value, options, xmin, ymin, xmax, ymax):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
# determine text info
R, G, B = obj_R_G_B(canvas._fillColorObj)
#print "rgb", (R,G,B)
font = canvas. _fontname
fontsize = canvas. _fontsize
field = SelectField(title, value, options, xmin, ymin, xmax, ymax, page,
font=font, fontsize=fontsize, R=R, G=G, B=B)
self.fields.append(field)
canvas._addAnnotation(field)
def buttonField(self, canvas, title, value, xmin, ymin):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
field = ButtonField(title, value, xmin, ymin, page)
self.fields.append(field)
canvas._addAnnotation(field)
def format(self, document):
from reportlab.pdfbase.pdfdoc import PDFArray
proxy = PDFPattern(FormPattern, Resources=GLOBALRESOURCES, fields=PDFArray(self.fields))
return proxy.format(document)
FormPattern = [
'<<', LINEEND,
' /NeedAppearances true ', LINEEND,
' /DA ', PDFString('/Helv 0 Tf 0 g '), LINEEND,
' /DR ', LINEEND,
["Resources"],
' /Fields ', LINEEND,
["fields"],
'>>'
]
def FormFontsDictionary():
from reportlab.pdfbase.pdfdoc import PDFDictionary
fontsdictionary = PDFDictionary()
fontsdictionary.__RefOnly__ = 1
for (fullname, shortname) in FORMFONTNAMES.items():
fontsdictionary[shortname] = FormFont(fullname, shortname)
fontsdictionary["ZaDb"] = ZADB
return fontsdictionary
def FormResources():
return PDFPattern(FormResourcesDictionaryPattern,
Encoding=ENCODING, Font=GLOBALFONTSDICTIONARY)
ZaDbPattern = [
' <<'
' /BaseFont'
' /ZapfDingbats'
' /Name'
' /ZaDb'
' /Subtype'
' /Type1'
' /Type'
' /Font'
'>>']
ZADB = PDFPattern(ZaDbPattern)
FormResourcesDictionaryPattern = [
'<<',
' /Encoding ',
["Encoding"], LINEEND,
' /Font ',
["Font"], LINEEND,
'>>'
]
FORMFONTNAMES = {
"Helvetica": "Helv",
"Helvetica-Bold": "HeBo",
'Courier': "Cour",
'Courier-Bold': "CoBo",
'Courier-Oblique': "CoOb",
'Courier-BoldOblique': "CoBO",
'Helvetica-Oblique': "HeOb",
'Helvetica-BoldOblique': "HeBO",
'Times-Roman': "Time",
'Times-Bold': "TiBo",
'Times-Italic': "TiIt",
'Times-BoldItalic': "TiBI",
}
EncodingPattern = [
'<<',
' /PDFDocEncoding ',
["PDFDocEncoding"], LINEEND,
'>>',
]
PDFDocEncodingPattern = [
'<<'
' /Differences'
' ['
' 24'
' /breve'
' /caron'
' /circumflex'
' /dotaccent'
' /hungarumlaut'
' /ogonek'
' /ring'
' /tilde'
' 39'
' /quotesingle'
' 96'
' /grave'
' 128'
' /bullet'
' /dagger'
' /daggerdbl'
' /ellipsis'
' /emdash'
' /endash'
' /florin'
' /fraction'
' /guilsinglleft'
' /guilsinglright'
' /minus'
' /perthousand'
' /quotedblbase'
' /quotedblleft'
' /quotedblright'
' /quoteleft'
' /quoteright'
' /quotesinglbase'
' /trademark'
' /fi'
' /fl'
' /Lslash'
' /OE'
' /Scaron'
' /Ydieresis'
' /Zcaron'
' /dotlessi'
' /lslash'
' /oe'
' /scaron'
' /zcaron'
' 160'
' /Euro'
' 164'
' /currency'
' 166'
' /brokenbar'
' 168'
' /dieresis'
' /copyright'
' /ordfeminine'
' 172'
' /logicalnot'
' /.notdef'
' /registered'
' /macron'
' /degree'
' /plusminus'
' /twosuperior'
' /threesuperior'
' /acute'
' /mu'
' 183'
' /periodcentered'
' /cedilla'
' /onesuperior'
' /ordmasculine'
' 188'
' /onequarter'
' /onehalf'
' /threequarters'
' 192'
' /Agrave'
' /Aacute'
' /Acircumflex'
' /Atilde'
' /Adieresis'
' /Aring'
' /AE'
' /Ccedilla'
' /Egrave'
' /Eacute'
' /Ecircumflex'
' /Edieresis'
' /Igrave'
' /Iacute'
' /Icircumflex'
' /Idieresis'
' /Eth'
' /Ntilde'
' /Ograve'
' /Oacute'
' /Ocircumflex'
' /Otilde'
' /Odieresis'
' /multiply'
' /Oslash'
' /Ugrave'
' /Uacute'
' /Ucircumflex'
' /Udieresis'
' /Yacute'
' /Thorn'
' /germandbls'
' /agrave'
' /aacute'
' /acircumflex'
' /atilde'
' /adieresis'
' /aring'
' /ae'
' /ccedilla'
' /egrave'
' /eacute'
' /ecircumflex'
' /edieresis'
' /igrave'
' /iacute'
' /icircumflex'
' /idieresis'
' /eth'
' /ntilde'
' /ograve'
' /oacute'
' /ocircumflex'
' /otilde'
' /odieresis'
' /divide'
' /oslash'
' /ugrave'
' /uacute'
' /ucircumflex'
' /udieresis'
' /yacute'
' /thorn'
' /ydieresis'
' ]'
' /Type'
' /Encoding'
'>>']
# global constant
PDFDOCENC = PDFPattern(PDFDocEncodingPattern)
# global constant
ENCODING = PDFPattern(EncodingPattern, PDFDocEncoding=PDFDOCENC)
def FormFont(BaseFont, Name):
from reportlab.pdfbase.pdfdoc import PDFName
return PDFPattern(FormFontPattern, BaseFont=PDFName(BaseFont), Name=PDFName(Name), Encoding=PDFDOCENC)
FormFontPattern = [
'<<',
' /BaseFont ',
["BaseFont"], LINEEND,
' /Encoding ',
["Encoding"], LINEEND,
' /Name ',
["Name"], LINEEND,
' /Subtype '
' /Type1 '
' /Type '
' /Font '
'>>' ]
# global constants
GLOBALFONTSDICTIONARY = FormFontsDictionary()
GLOBALRESOURCES = FormResources()
def TextField(title, value, xmin, ymin, xmax, ymax, page,
maxlen=1000000, font="Helvetica-Bold", fontsize=9, R=0, G=0, B=0.627, multiline=0):
from reportlab.pdfbase.pdfdoc import PDFString, PDFName
Flags = 0
if multiline:
Flags = Flags | (1<<12) # bit 13 is at position 12 :)
fontname = FORMFONTNAMES[font]
return PDFPattern(TextFieldPattern,
value=PDFString(value), maxlen=maxlen, page=page,
title=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
fontname=PDFName(fontname), fontsize=fontsize, R=R, G=G, B=B, Flags=Flags)
TextFieldPattern = [
'<<'
' /DA'
' (', ["fontname"],' ',["fontsize"],' Tf ',["R"],' ',["G"],' ',["B"],' rg)'
' /DV ',
["value"], LINEEND,
' /F 4 /FT /Tx'
'/MK << /BC [ 0 0 0 ] >>'
' /MaxLen ',
["maxlen"], LINEEND,
' /P ',
["page"], LINEEND,
' /Rect '
' [', ["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"], ' ]'
'/Subtype /Widget'
' /T ',
["title"], LINEEND,
' /Type'
' /Annot'
' /V ',
["value"], LINEEND,
' /Ff ',
["Flags"],LINEEND,
'>>']
def SelectField(title, value, options, xmin, ymin, xmax, ymax, page,
font="Helvetica-Bold", fontsize=9, R=0, G=0, B=0.627):
#print "ARGS", (title, value, options, xmin, ymin, xmax, ymax, page, font, fontsize, R, G, B)
from reportlab.pdfbase.pdfdoc import PDFString, PDFName, PDFArray
if value not in options:
raise ValueError, "value %s must be one of options %s" % (repr(value), repr(options))
fontname = FORMFONTNAMES[font]
optionstrings = map(PDFString, options)
optionarray = PDFArray(optionstrings)
return PDFPattern(SelectFieldPattern,
Options=optionarray,
Selected=PDFString(value), Page=page,
Name=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
fontname=PDFName(fontname), fontsize=fontsize, R=R, G=G, B=B)
SelectFieldPattern = [
'<< % a select list',LINEEND,
' /DA ',
' (', ["fontname"],' ',["fontsize"],' Tf ',["R"],' ',["G"],' ',["B"],' rg)',LINEEND,
#' (/Helv 12 Tf 0 g)',LINEEND,
' /DV ',
["Selected"],LINEEND,
' /F ',
' 4',LINEEND,
' /FT ',
' /Ch',LINEEND,
' /MK ',
' <<',
' /BC',
' [',
' 0',
' 0',
' 0',
' ]',
' /BG',
' [',
' 1',
' 1',
' 1',
' ]',
' >>',LINEEND,
' /Opt ',
["Options"],LINEEND,
' /P ',
["Page"],LINEEND,
'/Rect',
' [',["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"],
' ] ',LINEEND,
'/Subtype',
' /Widget',LINEEND,
' /T ',
["Name"],LINEEND,
' /Type ',
' /Annot',
' /V ',
["Selected"],LINEEND,
'>>']
def ButtonField(title, value, xmin, ymin, page):
if value not in ("Yes", "Off"):
raise ValueError, "button value must be 'Yes' or 'Off': "+repr(value)
(dx, dy) = (16.77036, 14.90698)
return PDFPattern(ButtonFieldPattern,
Name=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmin+dx, ymax=ymin+dy,
Hide=HIDE,
APDOff=APDOFF,
APDYes=APDYES,
APNYes=APNYES,
Value=PDFName(value),
Page=page)
ButtonFieldPattern = ['<< ',
'/AA',
' <<',
' /D ',
["Hide"], LINEEND,
#' %(imported.18.0)s',
' >> ',
'/AP ',
' <<',
' /D',
' <<',
' /Off ',
#' %(imported.40.0)s',
["APDOff"], LINEEND,
' /Yes ',
#' %(imported.39.0)s',
["APDYes"], LINEEND,
' >>', LINEEND,
' /N',
' << ',
' /Yes ',
#' %(imported.38.0)s',
["APNYes"], LINEEND,
' >>',
' >>', LINEEND,
' /AS ',
["Value"], LINEEND,
' /DA ',
PDFString('/ZaDb 0 Tf 0 g'), LINEEND,
'/DV ',
["Value"], LINEEND,
'/F ',
' 4 ',
'/FT ',
' /Btn ',
'/H ',
' /T ',
'/MK ',
' <<',
' /AC (\\376\\377)',
#PDFString('\376\377'),
' /CA ',
PDFString('4'),
' /RC ',
PDFString('\376\377'),
' >> ',LINEEND,
'/P ',
["Page"], LINEEND,
'/Rect',
' [',["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"],
' ] ',LINEEND,
'/Subtype',
' /Widget ',
'/T ',
["Name"], LINEEND,
'/Type',
' /Annot ',
'/V ',
["Value"], LINEEND,
' >>']
HIDE = PDFPattern([
'<< '
'/S '
' /Hide '
'>>'])
def buttonStreamDictionary():
"everything except the length for the button appearance streams"
result = PDFDictionary()
result["SubType"] = "/Form"
result["BBox"] = "[0 0 16.77036 14.90698]"
font = PDFDictionary()
font["ZaDb"] = ZADB
resources = PDFDictionary()
resources["ProcSet"] = "[ /PDF /Text ]"
resources["Font"] = font
result["Resources"] = resources
return result
def ButtonStream(content):
dict = buttonStreamDictionary()
result = PDFStream(dict, content)
result.filters = []
return result
APDOFF = ButtonStream('0.749 g 0 0 16.7704 14.907 re f'+LINEEND)
APDYES = ButtonStream(
'0.749 g 0 0 16.7704 14.907 re f q 1 1 14.7704 12.907 re W '+
'n BT /ZaDb 11.3086 Tf 0 g 1 0 0 1 3.6017 3.3881 Tm (4) Tj ET'+LINEEND)
APNYES = ButtonStream(
'q 1 1 14.7704 12.907 re W n BT /ZaDb 11.3086 Tf 0 g 1 0 0 1 3.6017 3.3881 Tm (4) Tj ET Q'+LINEEND)
#==== script interpretation
if __name__=="__main__":
test1()
|
|
## CSC320 Winter 2016
## Assignment 2
## (c) Kyros Kutulakos
##
## DISTRIBUTION OF THIS CODE ANY FORM (ELECTRONIC OR OTHERWISE,
## AS-IS, MODIFIED OR IN PART), WITHOUT PRIOR WRITTEN AUTHORIZATION
## BY THE INSTRUCTOR IS STRICTLY PROHIBITED. VIOLATION OF THIS
## POLICY WILL BE CONSIDERED AN ACT OF ACADEMIC DISHONESTY
##
## DO NOT MODIFY THIS FILE
##
## You do NOT need to look at this file for your A2 implementation
##
## This file defines a single class called RootWidget that
## controls the behavior of the RootWidget defined in the
## file viscomp.kv.
##
## It's purpose is two-fold:
## (a) It isolates all functions called by the widgets viscomp.kv
## from all other code
## (b) It defines all the routines required for the GUI to operate
## correctly
##
import kivy
kivy.require('1.9.1')
from kivy import Config
# disable fullscreen mode
Config.set('graphics','fullscreen','0')
# do not allow window resizing
Config.set('graphics','resizable','0')
from kivy.app import App
from kivy.uix.label import Label
from kivy.properties import StringProperty
from kivy.properties import ObjectProperty
from kivy.graphics import *
from kivy.input.postproc.doubletap import *
from kivy.lang import Builder
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.popup import Popup
from kivy.logger import Logger
from control import InpaintingControl
#
# The class that defines the whole GUI application.
# This class is created immediately when the program
# is run. Then control is passed to the program's
# main() function which must ultimately call
# its run() method to display all windows and enter
# the GUI's main loop.
#
class VisCompApp(App):
def build(self):
'''This method loads the VisComp.kv file automatically
:rtype: none
'''
# This tells Kivy's window- and widget-building
# routine where to find the .kv file that specifies
# all the widgets to be created
try:
filename = 'kv/viscomp.kv'
# loading the content of viscomp.kv and building its widgets
self.root = Builder.load_file(filename)
except Exception as e:
Logger.exception('VisComp: Unable to load <%s>' % filename)
def on_pause(self):
return True
#
# Class definitions for the two dialog box widgets
# used by the GUI, for opening and saving files, respectively
# These are taken from Kivy's RST_Editor tutorial example
#
class LoadDialog(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
class SaveDialog(FloatLayout):
save = ObjectProperty(None)
text_input = ObjectProperty(None)
cancel = ObjectProperty(None)
class DebugDialog(FloatLayout):
patch_radius_callback = ObjectProperty(None)
show_patch_callback = ObjectProperty(None)
show_vectors_callback = ObjectProperty(None)
max_iterations_callback = ObjectProperty(None)
verbose_callback = ObjectProperty(None)
show_intensities_callback = ObjectProperty(None)
patch_radius = ObjectProperty(None)
max_iterations = ObjectProperty(None)
show_patch = ObjectProperty(None)
show_vectors = ObjectProperty(None)
show_intensities = ObjectProperty(None)
verbose = ObjectProperty(None)
#
# Class that controls the complete GUI. It is derived from
# Kivy's built-in FloadLayout widget class
class RootWidget(FloatLayout):
# Create an instance of the InpaintingControl class that will
# take care of all functionalities related to inpainting
inpaintingControl = InpaintingControl()
savefile = ObjectProperty(None)
text_input = ObjectProperty(None)
#
# All the methods below are called by widgets of viscomp.kv
#
# Switch the GUI's current mode
def next_mode(self):
# change the mode of the inpainting class
self.inpaintingControl.nextMode()
# update the variable holding the current mode's descriptive text
self.modeText = self.currentModeMsg()
# update the variable holding the current image's descriptive text
self.imageText = self.currentImageMsg()
# display the current image in the imview widget
self.display_current_image()
# Switch the GUI's current image
def next_image(self):
# change the current image of the inpainting class
self.inpaintingControl.nextImage()
# update the variable holding the current mode's descriptive text
self.imageText = self.currentImageMsg()
# display the current image in the imview widget
self.display_current_image()
# Run the algorithm associate with the GUI's current mode
def run_algorithm(self):
# the algorithm is executed by calling the method of
# the InpaintingControl class
ok, msg = self.inpaintingControl.runAlgorithm(self.ids.imviewer)
if not ok:
# if the method returned an error, display a popup
# with an error message
self.show_error_popup(self.currentModeMsg(), msg)
else:
# if there were no errors, display the current image
# we do this to ensure that if the currently-displayed
# image is an output image of the algorithm, the image is
# updated with the algorithm's results
self.display_current_image()
return
# Run the algorithm associate with the GUI's current mode for ONE iteration
def step_algorithm(self):
# the algorithm is executed by calling the method of
# the InpaintingControl class
ok, msg = self.inpaintingControl.runAlgorithm(self.ids.imviewer, maxIterations=1)
if not ok:
# if the method returned an error, display a popup
# with an error message
self.show_error_popup(self.currentModeMsg(), msg)
else:
# if there were no errors, display the current image
# we do this to ensure that if the currently-displayed
# image is an output image of the algorithm, the image is
# updated with the algorithm's results
self.display_current_image()
return
# These methods simply call the associated routine of the
# inpaintingControl class to get the descriptive strings to be
# displayed by the GUI's various buttons
def currentModeMsg(self):
return self.inpaintingControl.currentModeMsg()
def currentImageMsg(self):
return self.inpaintingControl.currentImageMsg()
def currentFileActionMsg(self):
return self.inpaintingControl.currentFileActionMsg()
# Method to update the image displayed by the imviewer widget
def display_current_image(self):
# first we get the OpenCV image associated with the GUI's
# current image
currentOpenCVImage = self.inpaintingControl.imageData()
# we also get the name of the image
currentOpenCVImageName = self.inpaintingControl.imageName()
# then we call the imviewer's display routine to display it
self.ids.imviewer.display_opencv_image(im=currentOpenCVImage, name=currentOpenCVImageName)
# Method to display a small popup window showing an error message
# The method expects a title for the popup as well as a message
def show_error_popup(self, title, message):
try:
content = Label(text=message)
self._popup = Popup(title=title, content=content,
size_hint=(0.9, None))
self._popup.open()
except Exception as e:
Logger.exception('VisComp: Error %s' %message)
# Method to close a popup that is currently shown on screen
def dismiss_error_popup(self):
self._popup.dismiss()
show_vectors_callback = ObjectProperty(None)
show_intensities_callback = ObjectProperty(None)
# Routine to display the widgets for controlling the debug display
def show_debug(self):
content = DebugDialog(
patch_radius_callback=self.inpaintingControl.setPatchRadius,
show_patch_callback=self.inpaintingControl.debug.setShowPatch,
show_vectors_callback=self.inpaintingControl.debug.setShowVectors,
max_iterations_callback=self.inpaintingControl.setMaxIterations,
verbose_callback=self.inpaintingControl.debug.setVerbose,
show_intensities_callback=
self.inpaintingControl.debug.setShowIntensities,
patch_radius = self.inpaintingControl.patchRadius(),
max_iterations = self.inpaintingControl.maxIterations(),
show_patch = self.inpaintingControl.debug.showPatch(),
show_vectors = self.inpaintingControl.debug.showVectors(),
verbose = self.inpaintingControl.debug.verbose(),
show_intensities = self.inpaintingControl.debug.showIntensities())
self._debug = Popup(title="Debug Display Control", content=content, auto_dismiss=True, size_hint=(0.9, 0.9))
self._debug.open()
#print self._debug.ids.container.children[0].ids.show_vectors.active
# Routine to display the dialog box for selecting an image file for
# opening/writing
def show_dialog(self):
if self.inpaintingControl.isInputImage():
content = LoadDialog(load=self.load, cancel=self.dismiss_error_popup)
self._popup = Popup(title="Open Image", content=content,
size_hint=(0.9, 0.9))
self._popup.open()
elif self.inpaintingControl.isOutputImage():
content = SaveDialog(save=self.save, cancel=self.dismiss_error_popup)
self._popup = Popup(title="Save Image", content=content,
size_hint=(0.9, 0.9))
self._popup.open()
# Lower-level routines for loading and saving an image
def _loadsave(self, filename, func, s):
if len(filename)<=0:
return
ok, msg = func(filename)
if not ok:
title = 'Error %s Image'%s
self.show_error_popup(title, msg)
else:
self.display_current_image()
self.dismiss_error_popup()
def load(self, path, filenameList):
s = 'Opening'
if filenameList is not None:
self._loadsave(filenameList[0], self.inpaintingControl.load, s)
def save(self, path, filename):
s = 'Saving'
if filename is not None:
self._loadsave(filename, self.inpaintingControl.save, s)
|
|
from __future__ import print_function
import os
import glob
import json
import logging
import datetime
from time import time
from collections import defaultdict
from billy.core import settings, db
from billy.utils import (metadata, term_for_session, fix_bill_id,
JSONEncoderPlus)
from billy.importers.names import get_legislator_id
from billy.importers.filters import apply_filters
from billy.importers.subjects import SubjectCategorizer
from billy.importers.utils import (insert_with_id, update, prepare_obj,
next_big_id, get_committee_id)
if hasattr(settings, "ENABLE_GIT") and settings.ENABLE_GIT:
from dulwich.repo import Repo
from dulwich.objects import Blob
from dulwich.objects import Tree
from dulwich.objects import Commit, parse_timezone
filters = settings.BILL_FILTERS
logger = logging.getLogger('billy')
def match_sponsor_ids(abbr, bill):
for sponsor in bill['sponsors']:
# use sponsor's chamber if specified
sponsor['leg_id'] = get_legislator_id(abbr, bill['session'],
sponsor.get('chamber',
bill['chamber']),
sponsor['name'])
if sponsor['leg_id'] is None:
sponsor['leg_id'] = get_legislator_id(abbr, bill['session'], None,
sponsor['name'])
if sponsor['leg_id'] is None:
sponsor['committee_id'] = get_committee_id(abbr, bill['chamber'],
sponsor['name'])
def load_standalone_votes(data_dir):
pattern = os.path.join(data_dir, 'votes', '*.json')
paths = glob.glob(pattern)
votes = defaultdict(list)
for path in paths:
with open(path) as f:
data = prepare_obj(json.load(f))
# need to match bill_id already in the database
bill_id = fix_bill_id(data.pop('bill_id'))
votes[(data['bill_chamber'], data['session'], bill_id)].append(data)
logger.info('imported %s vote files' % len(paths))
return votes
git_active_repo = None
git_active_commit = None
git_active_tree = None
git_old_tree = None
HEAD = None
def git_add_bill(data):
if not hasattr(settings, "ENABLE_GIT") or not settings.ENABLE_GIT:
return
global git_active_repo
global git_active_tree
global git_active_commit
bill = json.dumps(data, cls=JSONEncoderPlus, sort_keys=True, indent=4)
spam = Blob.from_string(bill)
bid = str(data['_id'])
git_active_repo.object_store.add_object(spam)
# 0100644 octal -> 33188 decimal
git_active_tree[bid] = (33188, spam.id)
git_active_tree.check()
print("added %s - %s" % (data['_id'], spam.id))
def git_commit(message):
if not hasattr(settings, "ENABLE_GIT") or not settings.ENABLE_GIT:
return
print("Commiting import as '%s'" % message)
global git_active_repo
global git_active_tree
global git_old_tree
global git_active_commit
global HEAD
repo = git_active_repo
if git_old_tree == git_active_tree.id:
# We don't wait t commit twice.
print("Nothing new here. Bailing out.")
return
c = git_active_commit
c.tree = git_active_tree.id
c.parents = [HEAD]
repo.object_store.add_object(git_active_tree)
c.author = c.committer = "Billy <billy@localhost>"
c.commit_time = c.author_time = int(time())
tz = parse_timezone("-0400")[0]
c.commit_timezone = c.author_timezone = tz
c.encoding = "UTF-8"
c.message = message
repo.object_store.add_object(c)
repo.refs['refs/heads/master'] = c.id
def git_repo_init(gitdir):
os.mkdir(gitdir)
repo = Repo.init_bare(gitdir)
blob = Blob.from_string("""Why, Hello there!
This is your friendly Legislation tracker, Billy here.
This is a git repo full of everything I write to the DB. This isn't super
useful unless you're debugging production issues.
Fondly,
Bill, your local Billy instance.""")
tree = Tree()
tree.add("README", 33188, blob.id)
commit = Commit()
commit.tree = tree.id
author = "Billy <billy@localhost>"
commit.author = commit.committer = author
commit.commit_time = commit.author_time = int(time())
tz = parse_timezone('-0400')[0]
commit.commit_timezone = commit.author_timezone = tz
commit.encoding = "UTF-8"
commit.message = "Initial commit"
repo.object_store.add_object(blob)
repo.object_store.add_object(tree)
repo.object_store.add_object(commit)
repo.refs['refs/heads/master'] = commit.id
def git_prelod(abbr):
if not hasattr(settings, "ENABLE_GIT") or not settings.ENABLE_GIT:
return
global git_active_repo
global git_active_commit
global git_active_tree
global git_old_tree
global HEAD
gitdir = "%s/%s.git" % (settings.GIT_PATH, abbr)
if not os.path.exists(gitdir):
git_repo_init(gitdir)
git_active_repo = Repo(gitdir)
git_active_commit = Commit()
HEAD = git_active_repo.head()
commit = git_active_repo.commit(HEAD)
tree = git_active_repo.tree(commit.tree)
git_old_tree = tree.id
git_active_tree = tree
def import_bill(data, standalone_votes, categorizer):
"""
insert or update a bill
data - raw bill JSON
standalone_votes - votes scraped separately
categorizer - SubjectCategorizer (None - no categorization)
"""
abbr = data[settings.LEVEL_FIELD]
# clean up bill_ids
data['bill_id'] = fix_bill_id(data['bill_id'])
if 'alternate_bill_ids' in data:
data['alternate_bill_ids'] = [fix_bill_id(bid) for bid in
data['alternate_bill_ids']]
# move subjects to scraped_subjects
# NOTE: intentionally doesn't copy blank lists of subjects
# this avoids the problem where a bill is re-run but we can't
# get subjects anymore (quite common)
subjects = data.pop('subjects', None)
if subjects:
data['scraped_subjects'] = subjects
# update categorized subjects
if categorizer:
categorizer.categorize_bill(data)
# companions
for companion in data['companions']:
companion['bill_id'] = fix_bill_id(companion['bill_id'])
# query based on companion
spec = companion.copy()
spec[settings.LEVEL_FIELD] = abbr
if not spec['chamber']:
spec.pop('chamber')
companion_obj = db.bills.find_one(spec)
if companion_obj:
companion['internal_id'] = companion_obj['_id']
else:
logger.warning('Unknown companion: {chamber} {session} {bill_id}'
.format(**companion))
# look for a prior version of this bill
bill = db.bills.find_one({settings.LEVEL_FIELD: abbr,
'session': data['session'],
'chamber': data['chamber'],
'bill_id': data['bill_id']})
# keep doc ids consistent
doc_matcher = DocumentMatcher(abbr)
if bill:
doc_matcher.learn_ids(bill['versions'] + bill['documents'])
doc_matcher.set_ids(data['versions'] + data['documents'])
# match sponsor leg_ids
match_sponsor_ids(abbr, data)
# process votes ############
# pull votes off bill
bill_votes = data.pop('votes', [])
# grab the external bill votes if present
if metadata(abbr).get('_partial_vote_bill_id'):
# this is a hack initially added for Rhode Island where we can't
# determine the full bill_id, if this key is in the metadata
# we just use the numeric portion, not ideal as it won't work
# where HB/SBs overlap, but in RI they never do
# pull off numeric portion of bill_id
numeric_bill_id = data['bill_id'].split()[1]
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
numeric_bill_id), [])
else:
# add loaded votes to data
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
data['bill_id']), [])
# do id matching and other vote prep
if bill:
prepare_votes(abbr, data['session'], bill['_id'], bill_votes)
else:
prepare_votes(abbr, data['session'], None, bill_votes)
# process actions ###########
dates = {'first': None, 'last': None, 'passed_upper': None,
'passed_lower': None, 'signed': None}
vote_flags = {
"bill:passed",
"bill:failed",
"bill:veto_override:passed",
"bill:veto_override:failed",
"amendment:passed",
"amendment:failed",
"committee:passed",
"committee:passed:favorable",
"committee:passed:unfavorable",
"committee:passed:failed"
}
already_linked = set()
remove_vote = set()
for action in data['actions']:
adate = action['date']
def _match_committee(name):
return get_committee_id(abbr, action['actor'], name)
def _match_legislator(name):
return get_legislator_id(abbr,
data['session'],
action['actor'],
name)
resolvers = {
"committee": _match_committee,
"legislator": _match_legislator
}
if "related_entities" in action:
for entity in action['related_entities']:
try:
resolver = resolvers[entity['type']]
except KeyError as e:
# We don't know how to deal.
logger.error("I don't know how to sort a %s" % e)
continue
id = resolver(entity['name'])
entity['id'] = id
# first & last dates
if not dates['first'] or adate < dates['first']:
dates['first'] = adate
if not dates['last'] or adate > dates['last']:
dates['last'] = adate
# passed & signed dates
if (not dates['passed_upper'] and action['actor'] == 'upper'
and 'bill:passed' in action['type']):
dates['passed_upper'] = adate
elif (not dates['passed_lower'] and action['actor'] == 'lower'
and 'bill:passed' in action['type']):
dates['passed_lower'] = adate
elif (not dates['signed'] and 'governor:signed' in action['type']):
dates['signed'] = adate
# vote-action matching
action_attached = False
# only attempt vote matching if action has a date and is one of the
# designated vote action types
if set(action['type']).intersection(vote_flags) and action['date']:
for vote in bill_votes:
if not vote['date']:
continue
delta = abs(vote['date'] - action['date'])
if (delta < datetime.timedelta(hours=20) and
vote['chamber'] == action['actor']):
if action_attached:
# multiple votes match, we can't guess
action.pop('related_votes', None)
else:
related_vote = vote['vote_id']
if related_vote in already_linked:
remove_vote.add(related_vote)
already_linked.add(related_vote)
action['related_votes'] = [related_vote]
action_attached = True
# remove related_votes that we linked to multiple actions
for action in data['actions']:
for vote in remove_vote:
if vote in action.get('related_votes', []):
action['related_votes'].remove(vote)
# save action dates to data
data['action_dates'] = dates
data['_term'] = term_for_session(abbr, data['session'])
alt_titles = set(data.get('alternate_titles', []))
for version in data['versions']:
# Merge any version titles into the alternate_titles list
if 'title' in version:
alt_titles.add(version['title'])
if '+short_title' in version:
alt_titles.add(version['+short_title'])
try:
# Make sure the primary title isn't included in the
# alternate title list
alt_titles.remove(data['title'])
except KeyError:
pass
data['alternate_titles'] = list(alt_titles)
data = apply_filters(filters, data)
if not bill:
insert_with_id(data)
git_add_bill(data)
save_votes(data, bill_votes)
return "insert"
else:
update(bill, data, db.bills)
git_add_bill(bill)
save_votes(bill, bill_votes)
return "update"
def import_bills(abbr, data_dir):
data_dir = os.path.join(data_dir, abbr)
pattern = os.path.join(data_dir, 'bills', '*.json')
git_prelod(abbr)
counts = {
"update": 0,
"insert": 0,
"total": 0
}
votes = load_standalone_votes(data_dir)
try:
categorizer = SubjectCategorizer(abbr)
except Exception as e:
logger.debug('Proceeding without subject categorizer: %s' % e)
categorizer = None
paths = glob.glob(pattern)
for path in paths:
with open(path) as f:
data = prepare_obj(json.load(f))
counts["total"] += 1
ret = import_bill(data, votes, categorizer)
counts[ret] += 1
logger.info('imported %s bill files' % len(paths))
for remaining in votes.keys():
logger.debug('Failed to match vote %s %s %s' % tuple([
r.encode('ascii', 'replace') for r in remaining]))
populate_current_fields(abbr)
git_commit("Import Update")
return counts
def populate_current_fields(abbr):
"""
Set/update _current_term and _current_session fields on all bills
for a given location.
"""
meta = db.metadata.find_one({'_id': abbr})
current_term = meta['terms'][-1]
current_session = current_term['sessions'][-1]
for bill in db.bills.find({settings.LEVEL_FIELD: abbr}):
if bill['session'] == current_session:
bill['_current_session'] = True
else:
bill['_current_session'] = False
if bill['session'] in current_term['sessions']:
bill['_current_term'] = True
else:
bill['_current_term'] = False
db.bills.save(bill, safe=True)
def prepare_votes(abbr, session, bill_id, scraped_votes):
# if bill already exists, try and preserve vote_ids
vote_matcher = VoteMatcher(abbr)
if bill_id:
existing_votes = list(db.votes.find({'bill_id': bill_id}))
if existing_votes:
vote_matcher.learn_ids(existing_votes)
vote_matcher.set_ids(scraped_votes)
# link votes to committees and legislators
for vote in scraped_votes:
# committee_ids
if 'committee' in vote:
committee_id = get_committee_id(abbr, vote['chamber'],
vote['committee'])
vote['committee_id'] = committee_id
# vote leg_ids
vote['_voters'] = []
for vtype in ('yes_votes', 'no_votes', 'other_votes'):
svlist = []
for svote in vote[vtype]:
id = get_legislator_id(abbr, session, vote['chamber'], svote)
svlist.append({'name': svote, 'leg_id': id})
vote['_voters'].append(id)
vote[vtype] = svlist
def save_votes(bill, votes):
# doesn't delete votes if none were scraped this time
if not votes:
return
# remove all existing votes for this bill
db.votes.remove({'bill_id': bill['_id']}, safe=True)
# save the votes
for vote in votes:
vote['_id'] = vote['vote_id']
vote['bill_id'] = bill['_id']
vote[settings.LEVEL_FIELD] = bill[settings.LEVEL_FIELD]
vote['session'] = bill['session']
db.votes.save(vote, safe=True)
class GenericIDMatcher(object):
def __init__(self, abbr):
self.abbr = abbr
self.ids = {}
def _reset_sequence(self):
self.seq_for_key = defaultdict(int)
def _get_next_id(self):
return next_big_id(self.abbr, self.id_letter, self.id_collection)
def nondup_key_for_item(self, item):
# call user's key_for_item
key = self.key_for_item(item)
# running count of how many of this key we've seen
seq_num = self.seq_for_key[key]
self.seq_for_key[key] += 1
# append seq_num to key to avoid sharing key for multiple items
return key + (seq_num,)
def learn_ids(self, item_list):
""" read in already set ids on objects """
self._reset_sequence()
for item in item_list:
key = self.nondup_key_for_item(item)
self.ids[key] = item[self.id_key]
def set_ids(self, item_list):
""" set ids on an object, using internal mapping then new ids """
self._reset_sequence()
for item in item_list:
key = self.nondup_key_for_item(item)
item[self.id_key] = self.ids.get(key) or self._get_next_id()
class VoteMatcher(GenericIDMatcher):
id_letter = 'V'
id_collection = 'vote_ids'
id_key = 'vote_id'
def key_for_item(self, vote):
return (vote['motion'], vote['chamber'], vote['date'],
vote['yes_count'], vote['no_count'], vote['other_count'])
class DocumentMatcher(GenericIDMatcher):
id_letter = 'D'
id_collection = 'document_ids'
id_key = 'doc_id'
def key_for_item(self, document):
# URL is good enough as a key
return (document['url'],)
|
|
# -*- coding: utf-8 -*-
import argparse
import os
import pydoc
import re
import subprocess
import sys
import uuid
import webbrowser
from builtins import input
from django.utils.termcolors import colorize
import requests
from six import string_types
def r(msg):
return colorize(msg, fg="red")
def g(msg):
return colorize(msg, fg="green")
def y(msg):
return colorize(msg, fg="yellow")
def b(msg):
return colorize(msg, opts=('bold',))
def o(cmd, msg=None, shell=False, timeout=None):
if shell:
assert isinstance(cmd, string_types), "with shell, cmd must be string"
else:
if isinstance(cmd, string_types):
cmd = cmd.split()
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell,
cwd=os.getcwd(), stdin=subprocess.DEVNULL,
)
try:
proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
print("timeout, cmd=%s" % (cmd))
if msg and (proc.returncode or proc.returncode is None):
print(r(msg))
if proc.returncode is None:
print("setting returncode to 128")
proc.returncode = 128
return proc.stdout.read().decode("utf-8"), proc.returncode
def must(cmd, msg=None, shell=False, timeout=None):
op, code = o(cmd, shell=shell, timeout=timeout)
if code:
if msg:
print(r(msg))
print(op)
sys.exit(code)
raise Exception(
"Command failed with non zero exit code [%s:%s]\n%s" % (cmd, code, op)
)
return op
def gh_token():
return open(os.path.expanduser("~/.github")).read().strip().split(":")[1]
def gh_login():
return open(os.path.expanduser("~/.github")).read().strip().split(":")[0]
def gh_post_json(url, data):
url = "https://api.github.com/repos/amitu/worddb/" + url
return requests.post(url, params={"access_token": gh_token()}, json=data)
def gh_api(url, params=None):
if params is None:
params = {}
params["access_token"] = gh_token()
url = "https://api.github.com/repos/amitu/worddb/" + url
return requests.get(url, params=params)
def gh_update_pr(id, **data):
r = requests.patch(
"https://api.github.com/repos/amitu/worddb/issues/%d" % id,
params={
"access_token": gh_token(),
},
json=data,
)
if r.status_code != 200:
print(r.status_code)
print(r.text)
return r
def has_local_changes():
return bool(o("git diff --quiet")[1])
def has_unpushed_changes():
branch = current_branch()
return bool(must("git log origin/%s..%s" % (branch, branch)).strip())
def random_branch_name():
return str(uuid.uuid4()).replace("-", "")
def current_branch():
return must(
"git rev-parse --symbolic-full-name --abbrev-ref HEAD", "Not a git repo?"
).strip()
def wrap(msg):
return "\n".join(
'\n'.join(line.strip() for line in re.findall(r'.{1,70}(?:\s+|$)', x))
for x in msg.splitlines()
)
def author_title(pr):
author, title = pr["title"].split("] ", 1)
return author[1:], title
def handle_list(args):
print(y("Getting PR data."))
prs = gh_api("pulls").json()
branch = current_branch()
current = None
you = gh_login()
if branch != "master":
for pr in prs:
if pr["head"]["ref"] == current_branch():
author, title = author_title(pr)
if author != gh_login():
title = "[%s] %s" % (r(author), title)
if pr["assignee"]:
print(
"%s [%s -> %s]: %s" % (
b("Current"),
g("#%s" % pr["number"]),
pr["assignee"]["login"],
title,
)
)
else:
print(
"%s [%s]: %s" % (
b("Current"), g("#%s" % pr["number"]), title
)
)
current = pr
break
else:
print("No pull request for %s." % r(branch))
yours = []
review_pending = []
rest = []
for pr in prs:
author, title = author_title(pr)
if pr == current:
continue
if author == you:
yours.append(pr)
continue
if pr["assignee"] and pr["assignee"]["login"] == you:
review_pending.append(pr)
continue
rest.append(pr)
if yours:
print()
print(b("Yours:"))
for pr in reversed(yours):
_, title = author_title(pr)
if pr["assignee"]:
print(
"%s [%s]: %s" % (
g("#%s" % pr["number"]),
pr["assignee"]["login"],
title,
)
)
else:
print("%s: %s" % (g("#%s" % pr["number"]), title))
if review_pending:
print()
print(b("Your review needed:"))
for pr in reversed(review_pending):
author, title = author_title(pr)
print(
"%s [%s]: %s" % (
g("#%s" % pr["number"]),
r(author),
title,
)
)
if rest:
print()
print(b("Others:"))
for pr in reversed(rest):
author, title = author_title(pr)
if pr["assignee"]:
print(
"%s [%s -> %s]: %s" % (
g("#%s" % pr["number"]),
r(author),
pr["assignee"]["login"],
title,
)
)
else:
print(
"%s [%s]: %s" % (g("#%s" % pr["number"]), r(author), title)
)
def handle_show(args):
print(y("Fetching from origin."))
must("git fetch origin --prune")
print(y("Getting PR data."))
pr = gh_api("pulls/%d" % args.id).json()
message = "Author: %s\n" % pr["user"]["login"]
message += "Title: %s\n\n" % g(pr["title"])
message += wrap(pr["body"] or r("No body."))
diff, _ = o(
"git --no-pager diff origin/master...origin/%s --color" % (
pr["head"]["ref"]
), timeout=2,
)
message += "\n\n\n%s" % diff
pydoc.pager(message)
return pr, message
def handle_approve(args):
if has_local_changes():
print(r("Can not perform this operation with local changes."))
return
pr, message = handle_show(args)
current = current_branch()
branch = pr["head"]["ref"]
_, title = author_title(pr)
description = pr["body"] or ""
description += (
"\n\nPR: https://github.com/amitu/worddb/pull/%s" % args.id
)
description = wrap(description)
while True:
try:
resp = input(
"Do you want to merge? [%sHOW AGAIN/%spen/%ses/%so]: " % (
g("S"), g("o"), g("y"), g("n")
)
).lower()
if resp == "open" or resp == "o":
handle_open(args)
return
if resp == "yes" or resp == "y":
break
if resp == "" or resp == "show again" or resp == "s":
pydoc.pager(message)
continue
return
except KeyboardInterrupt:
return
must("git checkout %s" % branch)
print(y("Pulling origin."))
must("git pull origin %s" % branch)
must("git rebase origin/master")
must(["git", "commit", "--amend", "-m", "%s\n\n%s" % (title, description)])
print(y("Pushing origin"))
must("git push origin %s --force" % branch)
must("git checkout master")
must("git merge origin/master")
must("git rebase origin/%s" % branch)
must("git branch -d %s" % branch)
must("git push origin master :%s" % branch)
if current != branch:
must("git checkout %s" % current)
print("Approved %s." % g("#%s: %s" % (args.id, title)))
def handle_comment(args):
gh_post_json(
"issues/%d/comments" % args.id, {"body": args.comment}
).json()
print(g("Posted comment."))
def handle_open(args):
webbrowser.open("https://github.com/amitu/worddb/pull/%d" % args.id)
def handle_switch(args):
if has_local_changes():
print(r("Has local changes, can't switch."))
return
print(y("Fetching upstream changes."))
o("git fetch origin --prune", "Could not fetch, check internet/permissions.")
print(y("Getting PR info from Github."))
pr = gh_api("pulls/%d" % args.id).json()
if pr["state"] != "open":
print(r("Can not switch to non open PR [state=%s]." % pr["state"]))
return
if current_branch() == pr["head"]["ref"]:
print(r("You are already on #%s." % args.id))
return
must("git checkout %s" % pr["head"]["ref"])
print("Switched to: %s" % g("[#%d] %s" % (pr["number"], pr["title"])))
def last_commit_author():
return must("git log -1").split("\n")[1]
def handle_commit(args):
print(y("Fetching upstream changes."))
o("git fetch origin --prune", "Could not fetch, check internet/permissions.")
if not has_local_changes():
print("No local changes.")
must("git rebase origin/master")
print(y("Pushing upstream."))
must("git push -f", "Could not push, push manually.")
return
if args.preserve_author:
must("git commit --amend -am wip")
must("git rebase origin/master") # can conflict, resolve it,
print(y("Pushing upstream."))
must("git push -f")
print(g("Add done."))
return
oauthor = last_commit_author()
must("git commit --amend --reset-author -am wip")
must("git rebase origin/master") # can conflict, resolve it,
print(y("Pushing upstream."))
must("git push -f")
if oauthor == last_commit_author():
return
print(r("Committer changed, updating pull request."))
# to get PR number and current title for this branch. since we do not
# know the PR number either, we have to fetch full PR list, and match
print(y("Fetching PRs"))
branch = current_branch()
for pr in gh_api("pulls").json():
if pr["head"]["ref"] != branch:
continue
break
else:
print(r("Failed. No PR open for this branch [%s]!" % branch))
return
_, title = author_title(pr)
title = "[%s] %s" % (gh_login(), title)
if title != pr["title"]:
print(y("Updating PR"))
gh_update_pr(pr["number"], title=title)
print(g("Add done."))
def handle_assign(args):
pr = gh_update_pr(args.id, assignee=args.who).json()
print(
"Assigned %s to %s." % (
g("[#%s] %s" % (pr["number"], pr["title"])),
r(pr["assignee"]["login"]),
)
)
def handle_mm(args):
if has_local_changes():
print(r("Can not merge with local changes."))
return
print(y("Fetching latest changes."))
must("git fetch origin --prune")
must("git rebase origin/master")
print(y("Pushing changes to origin."))
must("git push -f")
print(g("Merged."))
def handle_unassign(args):
print(y("Unassigning."))
pr = gh_update_pr(args.id, assignee='').json()
print("Unassigned %s." % g("[#%s] %s" % (pr["number"], pr["title"])))
def handle_start(args):
if current_branch() != "master":
print(r("Start from master branch only."))
return
print(y("Fetching lastest master."))
o("git fetch origin --prune", "Could not fetch, check internet/permissions.")
branch = random_branch_name()
print(y("Creating branch: %s." % branch))
must("git checkout -b %s" % branch)
print(y("Pushing branch to GitHub."))
must("git commit -am wip --allow-empty")
must("git push --set-upstream origin %s" % branch)
print("Creating pull request.")
if not args.who:
args.who = gh_login()
title = "[%s] %s" % (args.who, args.title)
url = must(["hub", "pull-request", "-m", title]).strip()
print(g(url))
def handle_diff(args):
d1 = must("git diff --color")
d2 = must("git diff --cached --color")
d3 = must("git diff origin/master.. --color", shell=True)
msg = g("Unstaged changes:") + "\n\n" + d1 + "\n\n"
msg += g("Staged changes:") + "\n\n" + d2 + "\n\n"
msg += g("Committed changes:") + "\n\n" + d3
pydoc.pager(msg)
def forbidden_on_master(func):
return func in [handle_assign, handle_commit, handle_diff, handle_mm]
def main():
parser = argparse.ArgumentParser(
prog='gg', description="Git wrapper to work using acko workflow."
)
subparsers = parser.add_subparsers()
parser_list = subparsers.add_parser('list', help='list open tasks')
parser_list.set_defaults(func=handle_list)
parser_show = subparsers.add_parser('show', help='show details of a task')
parser_show.add_argument("id", help="task id", type=int)
parser_show.set_defaults(func=handle_show)
parser_approve = subparsers.add_parser('approve', help='approve a task')
parser_approve.add_argument("id", help="task id", type=int)
parser_approve.set_defaults(func=handle_approve)
parser_comment = subparsers.add_parser('comment', help='comment on a task')
parser_comment.add_argument("id", help="task id", type=int)
parser_comment.add_argument("comment", help="comment", type=str)
parser_comment.set_defaults(func=handle_comment)
paerser_open = subparsers.add_parser('open', help='browse task on github')
paerser_open.add_argument("id", help="task id", type=int)
paerser_open.set_defaults(func=handle_open)
parser_switch = subparsers.add_parser('switch', help='switch task')
parser_switch.add_argument("id", help="task id", type=int)
parser_switch.set_defaults(func=handle_switch)
parser_commit = subparsers.add_parser('commit', help='commit current task')
parser_commit.add_argument(
"-p", "--preserve-author",
help="Preserve authorship",
action="store_true",
)
parser_commit.set_defaults(func=handle_commit)
parser_assign = subparsers.add_parser(
'assign', help='assign a task for review'
)
parser_assign.add_argument("id", help="task id", type=int)
parser_assign.add_argument("who", help="who to assign to", type=str)
parser_assign.set_defaults(func=handle_assign)
parser_unassign = subparsers.add_parser(
'unassign', help='unassign a task for review'
)
parser_unassign.add_argument("id", help="task id", type=int)
parser_unassign.set_defaults(func=handle_unassign)
parser_start = subparsers.add_parser('start', help='create a new task')
parser_start.add_argument("title", help="title of the task", type=str)
parser_start.add_argument("--who", help="assign this task to", type=str)
parser_start.set_defaults(func=handle_start)
parser_diff = subparsers.add_parser('diff', help='show diff of current task')
parser_diff.set_defaults(func=handle_diff)
parser_mm = subparsers.add_parser(
'mm', help='rebase on latest master from origin'
)
parser_mm.set_defaults(func=handle_mm)
args = parser.parse_args()
if forbidden_on_master(args.func) and current_branch() == "master":
print(r("Can not do this operation on master."))
args.func(args)
"""
Which color library to use? Options are:
1. from django.utils.termcolors import colorize
2. https://pypi.python.org/pypi/termcolor
3. https://pypi.python.org/pypi/colorama
4. https://pypi.python.org/pypi/blessings/
5. https://github.com/kennethreitz/clint
6. https://pypi.python.org/pypi/colorprint/0.1
7. https://pypi.python.org/pypi/colorconsole
8. https://github.com/Robpol86/colorclass
We are going with django, as django is already our dependency.
"""
if __name__ == '__main__':
main()
|
|
# Testing the line trace facility.
from test import test_support
import unittest
import sys
import difflib
import gc
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception, exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError, exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
# Disable gc collection when tracing, otherwise the
# deallocators may be traced as well.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
def set_and_retrieve_none(self):
sys.settrace(None)
assert sys.gettrace() is None
def set_and_retrieve_func(self):
def fn(*args):
pass
sys.settrace(fn)
try:
assert sys.gettrace() is fn
finally:
sys.settrace(None)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.__code__.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
exec("def f():\n" + "\n" * 256 + " pass")
self.run_and_compare(
f,
[(0, 'call'),
(257, 'line'),
(257, 'return')])
class RaisingTraceFuncTestCase(unittest.TestCase):
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1/x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in xrange(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not thrown!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print i # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.func_code.co_firstlineno + 2):
raise RuntimeError, "i am crashing"
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.func_code:
firstLine = frame.f_code.co_firstlineno
if frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError, e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError), e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError, e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError, e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError, "Trace-function-less jump failed to fail"
class JumpTestCase(unittest.TestCase):
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
no_jump_without_trace_function()
def test_main():
test_support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
|
|
from flask import abort, current_app
from notifications_utils.serialised_model import SerialisedModelCollection
from werkzeug.utils import cached_property
from app.models import JSONModel, SortByNameMixin
from app.models.contact_list import ContactLists
from app.models.job import (
ImmediateJobs,
PaginatedJobs,
PaginatedUploads,
ScheduledJobs,
)
from app.models.organisation import Organisation
from app.models.user import InvitedUsers, User, Users
from app.notify_client.api_key_api_client import api_key_api_client
from app.notify_client.billing_api_client import billing_api_client
from app.notify_client.email_branding_client import email_branding_client
from app.notify_client.inbound_number_client import inbound_number_client
from app.notify_client.invite_api_client import invite_api_client
from app.notify_client.job_api_client import job_api_client
from app.notify_client.letter_branding_client import letter_branding_client
from app.notify_client.organisations_api_client import organisations_client
from app.notify_client.service_api_client import service_api_client
from app.notify_client.template_folder_api_client import (
template_folder_api_client,
)
from app.utils import get_default_sms_sender
class Service(JSONModel, SortByNameMixin):
ALLOWED_PROPERTIES = {
'active',
'allowed_broadcast_provider',
'billing_contact_email_addresses',
'billing_contact_names',
'billing_reference',
'broadcast_channel',
'consent_to_research',
'contact_link',
'count_as_live',
'email_from',
'go_live_at',
'go_live_user',
'id',
'inbound_api',
'message_limit',
'rate_limit',
'name',
'notes',
'prefix_sms',
'purchase_order_number',
'research_mode',
'service_callback_api',
'volume_email',
'volume_sms',
'volume_letter',
}
TEMPLATE_TYPES = (
'email',
'sms',
'letter',
'broadcast',
)
ALL_PERMISSIONS = TEMPLATE_TYPES + (
'edit_folder_permissions',
'email_auth',
'inbound_sms',
'international_letters',
'international_sms',
'upload_document',
'broadcast',
)
@classmethod
def from_id(cls, service_id):
return cls(service_api_client.get_service(service_id)['data'])
@property
def permissions(self):
return self._dict.get('permissions', self.TEMPLATE_TYPES)
@property
def billing_details(self):
billing_details = [
self.billing_contact_email_addresses,
self.billing_contact_names,
self.billing_reference,
self.purchase_order_number
]
if any(billing_details):
return billing_details
else:
return None
def update(self, **kwargs):
return service_api_client.update_service(self.id, **kwargs)
def update_count_as_live(self, count_as_live):
return service_api_client.update_count_as_live(self.id, count_as_live=count_as_live)
def update_status(self, live):
return service_api_client.update_status(self.id, live=live)
def switch_permission(self, permission):
return self.force_permission(
permission,
on=not self.has_permission(permission),
)
def force_permission(self, permission, on=False):
permissions, permission = set(self.permissions), {permission}
return self.update_permissions(
permissions | permission if on else permissions - permission,
)
def update_permissions(self, permissions):
return self.update(permissions=list(permissions))
def toggle_research_mode(self):
self.update(research_mode=not self.research_mode)
@property
def trial_mode(self):
return self._dict['restricted']
@property
def live(self):
return not self.trial_mode
def has_permission(self, permission):
if permission not in self.ALL_PERMISSIONS:
raise KeyError(f'{permission} is not a service permission')
return permission in self.permissions
def get_page_of_jobs(self, page):
return PaginatedJobs(self.id, page=page)
def get_page_of_uploads(self, page):
return PaginatedUploads(self.id, page=page)
@cached_property
def has_jobs(self):
return job_api_client.has_jobs(self.id)
@cached_property
def immediate_jobs(self):
if not self.has_jobs:
return []
return ImmediateJobs(self.id)
@cached_property
def scheduled_jobs(self):
if not self.has_jobs:
return []
return ScheduledJobs(self.id)
@cached_property
def scheduled_job_stats(self):
if not self.has_jobs:
return {'count': 0}
return job_api_client.get_scheduled_job_stats(self.id)
@cached_property
def invited_users(self):
return InvitedUsers(self.id)
def invite_pending_for(self, email_address):
return email_address.lower() in (
invited_user.email_address.lower()
for invited_user in self.invited_users
)
@cached_property
def active_users(self):
return Users(self.id)
@cached_property
def team_members(self):
return sorted(
self.invited_users + self.active_users,
key=lambda user: user.email_address.lower(),
)
@cached_property
def has_team_members(self):
return len([
user for user in self.team_members
if user.has_permission_for_service(self.id, 'manage_service')
]) > 1
def cancel_invite(self, invited_user_id):
if str(invited_user_id) not in {user.id for user in self.invited_users}:
abort(404)
return invite_api_client.cancel_invited_user(
service_id=self.id,
invited_user_id=str(invited_user_id),
)
def get_team_member(self, user_id):
if str(user_id) not in {user.id for user in self.active_users}:
abort(404)
return User.from_id(user_id)
@cached_property
def all_templates(self):
templates = service_api_client.get_service_templates(self.id)['data']
return [
template for template in templates
if template['template_type'] in self.available_template_types
]
@cached_property
def all_template_ids(self):
return {template['id'] for template in self.all_templates}
def get_templates(self, template_type='all', template_folder_id=None, user=None):
if user and template_folder_id:
folder = self.get_template_folder(template_folder_id)
if not user.has_template_folder_permission(folder):
return []
if isinstance(template_type, str):
template_type = [template_type]
if template_folder_id:
template_folder_id = str(template_folder_id)
return [
template for template in self.all_templates
if (set(template_type) & {'all', template['template_type']})
and template.get('folder') == template_folder_id
]
def get_template(self, template_id, version=None):
return service_api_client.get_service_template(self.id, template_id, version)['data']
def get_template_folder_with_user_permission_or_403(self, folder_id, user):
template_folder = self.get_template_folder(folder_id)
if not user.has_template_folder_permission(template_folder):
abort(403)
return template_folder
def get_template_with_user_permission_or_403(self, template_id, user):
template = self.get_template(template_id)
self.get_template_folder_with_user_permission_or_403(template['folder'], user)
return template
@property
def available_template_types(self):
return list(filter(self.has_permission, self.TEMPLATE_TYPES))
@property
def has_templates(self):
return bool(self.all_templates)
def has_folders(self):
return bool(self.all_template_folders)
@property
def has_multiple_template_types(self):
return len({
template['template_type'] for template in self.all_templates
}) > 1
@property
def has_estimated_usage(self):
return (
self.consent_to_research is not None and any((
self.volume_email,
self.volume_sms,
self.volume_letter,
))
)
@property
def has_email_templates(self):
return len(self.get_templates('email')) > 0
@property
def has_sms_templates(self):
return len(self.get_templates('sms')) > 0
@property
def intending_to_send_email(self):
if self.volume_email is None:
return self.has_email_templates
return self.volume_email > 0
@property
def intending_to_send_sms(self):
if self.volume_sms is None:
return self.has_sms_templates
return self.volume_sms > 0
@cached_property
def email_reply_to_addresses(self):
return service_api_client.get_reply_to_email_addresses(self.id)
@property
def has_email_reply_to_address(self):
return bool(self.email_reply_to_addresses)
@property
def count_email_reply_to_addresses(self):
return len(self.email_reply_to_addresses)
@property
def default_email_reply_to_address(self):
return next(
(
x['email_address']
for x in self.email_reply_to_addresses if x['is_default']
), None
)
def get_email_reply_to_address(self, id):
return service_api_client.get_reply_to_email_address(self.id, id)
@property
def needs_to_add_email_reply_to_address(self):
return self.intending_to_send_email and not self.has_email_reply_to_address
@property
def shouldnt_use_govuk_as_sms_sender(self):
return self.organisation_type != Organisation.TYPE_CENTRAL
@cached_property
def sms_senders(self):
return service_api_client.get_sms_senders(self.id)
@property
def sms_senders_with_hints(self):
def attach_hint(sender):
hints = []
if sender['is_default']:
hints += ["default"]
if sender['inbound_number_id']:
hints += ["receives replies"]
if hints:
sender['hint'] = "(" + " and ".join(hints) + ")"
return sender
return [attach_hint(sender) for sender in self.sms_senders]
@property
def default_sms_sender(self):
return get_default_sms_sender(self.sms_senders)
@property
def count_sms_senders(self):
return len(self.sms_senders)
@property
def sms_sender_is_govuk(self):
return self.default_sms_sender in {'GOVUK', 'None'}
def get_sms_sender(self, id):
return service_api_client.get_sms_sender(self.id, id)
@property
def needs_to_change_sms_sender(self):
return all((
self.intending_to_send_sms,
self.shouldnt_use_govuk_as_sms_sender,
self.sms_sender_is_govuk,
))
@cached_property
def letter_contact_details(self):
return service_api_client.get_letter_contacts(self.id)
@property
def count_letter_contact_details(self):
return len(self.letter_contact_details)
@property
def default_letter_contact_block(self):
return next(
(
letter_contact_block
for letter_contact_block in self.letter_contact_details
if letter_contact_block['is_default']
), None
)
@property
def default_letter_contact_block_html(self):
# import in the function to prevent cyclical imports
from app import nl2br
if self.default_letter_contact_block:
return nl2br(self.default_letter_contact_block['contact_block'])
return ''
def edit_letter_contact_block(self, id, contact_block, is_default):
service_api_client.update_letter_contact(
self.id, letter_contact_id=id, contact_block=contact_block, is_default=is_default,
)
def remove_default_letter_contact_block(self):
if self.default_letter_contact_block:
self.edit_letter_contact_block(
self.default_letter_contact_block['id'],
self.default_letter_contact_block['contact_block'],
is_default=False,
)
def get_letter_contact_block(self, id):
return service_api_client.get_letter_contact(self.id, id)
@property
def volumes(self):
return sum(filter(None, (
self.volume_email,
self.volume_sms,
self.volume_letter,
)))
@property
def go_live_checklist_completed(self):
return all((
bool(self.volumes),
self.has_team_members,
self.has_templates,
not self.needs_to_add_email_reply_to_address,
not self.needs_to_change_sms_sender,
))
@property
def go_live_checklist_completed_as_yes_no(self):
return 'Yes' if self.go_live_checklist_completed else 'No'
@cached_property
def free_sms_fragment_limit(self):
return billing_api_client.get_free_sms_fragment_limit_for_year(self.id) or 0
@cached_property
def data_retention(self):
return service_api_client.get_service_data_retention(self.id)
def get_data_retention_item(self, id):
return next(
(dr for dr in self.data_retention if dr['id'] == id),
None
)
def get_days_of_retention(self, notification_type):
return next(
(dr for dr in self.data_retention if dr['notification_type'] == notification_type),
{}
).get('days_of_retention', current_app.config['ACTIVITY_STATS_LIMIT_DAYS'])
@property
def email_branding_id(self):
return self._dict['email_branding']
@cached_property
def email_branding(self):
if self.email_branding_id:
return email_branding_client.get_email_branding(self.email_branding_id)['email_branding']
return None
@cached_property
def email_branding_name(self):
if self.email_branding is None:
return 'GOV.UK'
return self.email_branding['name']
@cached_property
def letter_branding_name(self):
if self.letter_branding is None:
return 'no'
return self.letter_branding['name']
@property
def needs_to_change_email_branding(self):
return self.email_branding_id is None and self.organisation_type != Organisation.TYPE_CENTRAL
@property
def letter_branding_id(self):
return self._dict['letter_branding']
@cached_property
def letter_branding(self):
if self.letter_branding_id:
return letter_branding_client.get_letter_branding(self.letter_branding_id)
return None
@cached_property
def organisation(self):
return Organisation.from_id(self.organisation_id)
@property
def organisation_id(self):
return self._dict['organisation']
@property
def organisation_type(self):
return self.organisation.organisation_type or self._dict['organisation_type']
@property
def organisation_name(self):
if not self.organisation_id:
return None
return organisations_client.get_organisation_name(self.organisation_id)
@property
def organisation_type_label(self):
return dict(Organisation.TYPES).get(self.organisation_type)
@cached_property
def inbound_number(self):
return inbound_number_client.get_inbound_sms_number_for_service(self.id)['data'].get('number', '')
@property
def has_inbound_number(self):
return bool(self.inbound_number)
@cached_property
def inbound_sms_summary(self):
if not self.has_permission('inbound_sms'):
return None
return service_api_client.get_inbound_sms_summary(self.id)
@cached_property
def all_template_folders(self):
return sorted(
template_folder_api_client.get_template_folders(self.id),
key=lambda folder: folder['name'].lower(),
)
@cached_property
def all_template_folder_ids(self):
return {folder['id'] for folder in self.all_template_folders}
def get_user_template_folders(self, user):
"""Returns a modified list of folders a user has permission to view
For each folder, we do the following:
- if user has no permission to view the folder, skip it
- if folder is visible and its parent is visible, we add it to the list of folders
we later return without modifying anything
- if folder is visible, but the parent is not, we iterate through the parent until we
either find a visible parent or reach root folder. On each iteration we concatenate
invisible parent folder name to the front of our folder name, modifying the name, and we
change parent_folder_id attribute to a higher level parent. This flattens the path to the
folder making sure it displays in the closest visible parent.
"""
user_folders = []
for folder in self.all_template_folders:
if not user.has_template_folder_permission(folder, service=self):
continue
parent = self.get_template_folder(folder["parent_id"])
if user.has_template_folder_permission(parent, service=self):
user_folders.append(folder)
else:
folder_attrs = {
"id": folder["id"], "name": folder["name"], "parent_id": folder["parent_id"],
"users_with_permission": folder["users_with_permission"]
}
while folder_attrs["parent_id"] is not None:
folder_attrs["name"] = [
parent["name"],
folder_attrs["name"],
]
if parent["parent_id"] is None:
folder_attrs["parent_id"] = None
else:
parent = self.get_template_folder(parent["parent_id"])
folder_attrs["parent_id"] = parent.get("id", None)
if user.has_template_folder_permission(parent, service=self):
break
user_folders.append(folder_attrs)
return user_folders
def get_template_folders(self, template_type='all', parent_folder_id=None, user=None):
if user:
folders = self.get_user_template_folders(user)
else:
folders = self.all_template_folders
if parent_folder_id:
parent_folder_id = str(parent_folder_id)
return [
folder for folder in folders
if (
folder['parent_id'] == parent_folder_id
and self.is_folder_visible(folder['id'], template_type, user)
)
]
def get_template_folder(self, folder_id):
if folder_id is None:
return {
'id': None,
'name': 'Templates',
'parent_id': None,
}
return self._get_by_id(self.all_template_folders, folder_id)
def is_folder_visible(self, template_folder_id, template_type='all', user=None):
if template_type == 'all':
return True
if self.get_templates(template_type, template_folder_id):
return True
if any(
self.is_folder_visible(child_folder['id'], template_type, user)
for child_folder in self.get_template_folders(template_type, template_folder_id, user)
):
return True
return False
def get_template_folder_path(self, template_folder_id):
folder = self.get_template_folder(template_folder_id)
if folder['id'] is None:
return [folder]
return self.get_template_folder_path(folder['parent_id']) + [
self.get_template_folder(folder['id'])
]
def get_template_path(self, template):
return self.get_template_folder_path(template['folder']) + [
template,
]
def get_template_folders_and_templates(self, template_type, template_folder_id):
return (
self.get_templates(template_type, template_folder_id)
+ self.get_template_folders(template_type, template_folder_id)
)
@property
def count_of_templates_and_folders(self):
return len(self.all_templates + self.all_template_folders)
def move_to_folder(self, ids_to_move, move_to):
ids_to_move = set(ids_to_move)
template_folder_api_client.move_to_folder(
service_id=self.id,
folder_id=move_to,
template_ids=ids_to_move & self.all_template_ids,
folder_ids=ids_to_move & self.all_template_folder_ids,
)
@cached_property
def api_keys(self):
return sorted(
api_key_api_client.get_api_keys(self.id)['apiKeys'],
key=lambda key: key['name'].lower(),
)
def get_api_key(self, id):
return self._get_by_id(self.api_keys, id)
@property
def able_to_accept_agreement(self):
return (
self.organisation.agreement_signed is not None
or self.organisation_type in {
Organisation.TYPE_NHS_GP,
Organisation.TYPE_NHS_LOCAL,
}
)
@cached_property
def returned_letter_statistics(self):
return service_api_client.get_returned_letter_statistics(self.id)
@cached_property
def returned_letter_summary(self):
return service_api_client.get_returned_letter_summary(self.id)
@property
def count_of_returned_letters_in_last_7_days(self):
return self.returned_letter_statistics['returned_letter_count']
@property
def date_of_most_recent_returned_letter_report(self):
return self.returned_letter_statistics['most_recent_report']
@property
def has_returned_letters(self):
return bool(self.date_of_most_recent_returned_letter_report)
@property
def contact_lists(self):
return ContactLists(self.id)
class Services(SerialisedModelCollection):
model = Service
|
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Installs and configures Swift
"""
import os
import re
import uuid
import netaddr
from packstack.installer import basedefs
from packstack.installer import validators
from packstack.installer import processors
from packstack.installer.exceptions import ParamValidationError
from packstack.installer import utils
from packstack.installer.utils import split_hosts
from packstack.modules.documentation import update_params_usage
from packstack.modules.ospluginutils import appendManifestFile
from packstack.modules.ospluginutils import createFirewallResources
from packstack.modules.ospluginutils import getManifestTemplate
from packstack.modules.ospluginutils import manifestfiles
# ------------- Swift Packstack Plugin Initialization --------------
PLUGIN_NAME = "OS-Swift"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
params = [
{"CMD_OPTION": "os-swift-ks-passwd",
"PROMPT": "Enter the password for the Swift Keystone access",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_SWIFT_KS_PW",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "os-swift-storages",
"PROMPT": "Enter the Swift Storage devices e.g. /path/to/dev",
"OPTION_LIST": [],
"VALIDATORS": [validate_storage],
"DEFAULT_VALUE": '',
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_SWIFT_STORAGES",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_SWIFT_STORAGE_HOSTS']},
{"CMD_OPTION": "os-swift-storage-zones",
"PROMPT": ("Enter the number of swift storage zones, MUST be no "
"bigger than the number of storage devices configured"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_integer],
"DEFAULT_VALUE": "1",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_SWIFT_STORAGE_ZONES",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-swift-storage-replicas",
"PROMPT": ("Enter the number of swift storage replicas, MUST be no "
"bigger than the number of storage zones configured"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_integer],
"DEFAULT_VALUE": "1",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_SWIFT_STORAGE_REPLICAS",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-swift-storage-fstype",
"PROMPT": "Enter FileSystem type for storage nodes",
"OPTION_LIST": ['xfs', 'ext4'],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "ext4",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_SWIFT_STORAGE_FSTYPE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-swift-hash",
"PROMPT": "Enter hash for Swift shared secret",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": uuid.uuid4().hex[:16],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_SWIFT_HASH",
"USE_DEFAULT": True,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "os-swift-storage-size",
"PROMPT": ("Enter the size of the storage device (eg. 2G, 2000M, "
"2000000K)"),
"OPTION_LIST": [],
"VALIDATORS": [validate_storage_size],
"DEFAULT_VALUE": "2G",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_SWIFT_STORAGE_SIZE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
]
update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
group = {"GROUP_NAME": "OSSWIFT",
"DESCRIPTION": "OpenStack Swift Config parameters",
"PRE_CONDITION": "CONFIG_SWIFT_INSTALL",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
def initSequences(controller):
if controller.CONF['CONFIG_SWIFT_INSTALL'] != 'y':
return
steps = [
{'title': 'Adding Swift Keystone manifest entries',
'functions': [create_keystone_manifest]},
{'title': 'Adding Swift builder manifest entries',
'functions': [create_builder_manifest]},
{'title': 'Adding Swift proxy manifest entries',
'functions': [create_proxy_manifest]},
{'title': 'Adding Swift storage manifest entries',
'functions': [create_storage_manifest]},
{'title': 'Adding Swift common manifest entries',
'functions': [create_common_manifest]},
]
controller.addSequence("Installing OpenStack Swift", [], [], steps)
# ------------------------- helper functions -------------------------
def validate_storage(param, options=None):
if not param:
return
if not param.startswith('/'):
raise ParamValidationError(
'Storage value has to be in format "/path/to/device".'
)
def validate_storage_size(param, options=None):
match = re.match(r'\d+G|\d+M|\d+K', param, re.IGNORECASE)
if not match:
msg = 'Storage size not have a valid value (eg. 1G, 1000M, 1000000K)'
raise ParamValidationError(msg)
def parse_devices(config):
"""
Returns dict containing information about Swift storage devices.
"""
devices = []
device_number = 0
num_zones = int(config["CONFIG_SWIFT_STORAGE_ZONES"])
for device in config["CONFIG_SWIFT_STORAGES"].split(","):
# we have to get rid of host part in case deprecated parameter
# CONFIG_SWIFT_STORAGE_HOSTS has been used
if ':' in device:
device = device.split(':')[1]
# device should be empty string in case only IP address has been used
try:
netaddr.IPAddress(device)
except Exception:
device = device.strip()
else:
device = ''
if not device:
continue
device_number += 1
zone = str((device_number % num_zones) + 1)
devices.append({'device': device, 'zone': zone,
'device_name': 'device%s' % device_number})
if not devices:
devices.append({'device': None, 'zone': 1,
'device_name': 'swiftloopback'})
return devices
def check_device(host, device):
"""
Raises ScriptRuntimeError if given device is not mounted on given
host.
"""
server = utils.ScriptRunner(host)
# the device MUST exist
cmd = 'ls -l %s'
server.append(cmd % device)
# if it is not mounted then we can use it
cmd = 'grep "%s " /proc/self/mounts || exit 0'
server.append(cmd % device)
# if it is mounted then the mount point has to be in /srv/node
cmd = 'grep "%s /srv/node" /proc/self/mounts && exit 0'
server.append(cmd % device)
# if we got here without exiting then we can't use this device
server.append('exit 1')
server.execute()
def get_storage_size(config):
ranges = {'G': 1048576, 'M': 1024, 'K': 1}
size = config['CONFIG_SWIFT_STORAGE_SIZE'].strip()
for measure in ['G', 'M', 'K']:
if re.match('\d+' + measure, size, re.IGNORECASE):
intsize = int(size.rstrip(measure)) * ranges[measure]
return intsize
# -------------------------- step functions --------------------------
def create_keystone_manifest(config, messages):
# parse devices in first step
global devices
devices = parse_devices(config)
manifestfile = "%s_keystone.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("keystone_swift")
appendManifestFile(manifestfile, manifestdata)
def create_builder_manifest(config, messages):
global devices
# The ring file should be built and distributed before the storage services
# come up. Specifically the replicator crashes if the ring isn't present
def device_def(dev_type, host, dev_port, devicename, zone):
# device host has to be IP address
host = utils.force_ip(host)
fmt = ('\n@@%s { "%s:%s/%s":\n'
' zone => %s,\n'
' weight => 10, }\n')
return fmt % (dev_type, host, dev_port, devicename, zone)
manifestfile = "%s_ring_swift.pp" % config['CONFIG_STORAGE_HOST']
manifestdata = getManifestTemplate("swift_builder")
# Add each device to the ring
devicename = 0
for device in devices:
host = config['CONFIG_STORAGE_HOST_URL']
devicename = device['device_name']
zone = device['zone']
for dev_type, dev_port in [('ring_object_device', 6000),
('ring_container_device', 6001),
('ring_account_device', 6002)]:
manifestdata += device_def(dev_type, host, dev_port, devicename,
zone)
appendManifestFile(manifestfile, manifestdata, 'swiftbuilder')
def create_proxy_manifest(config, messages):
manifestfile = "%s_swift.pp" % config['CONFIG_STORAGE_HOST']
manifestdata = getManifestTemplate("swift_proxy")
fw_details = dict()
key = "swift_proxy"
fw_details.setdefault(key, {})
fw_details[key]['host'] = "ALL"
fw_details[key]['service_name'] = "swift proxy"
fw_details[key]['chain'] = "INPUT"
fw_details[key]['ports'] = ['8080']
fw_details[key]['proto'] = "tcp"
config['FIREWALL_SWIFT_PROXY_RULES'] = fw_details
manifestdata += createFirewallResources('FIREWALL_SWIFT_PROXY_RULES')
appendManifestFile(manifestfile, manifestdata)
def create_storage_manifest(config, messages):
global devices
manifestfile = "%s_swift.pp" % config['CONFIG_STORAGE_HOST']
manifestdata = getManifestTemplate("swift_storage")
# this need to happen once per storage device
for device in devices:
host = config['CONFIG_STORAGE_HOST']
devicename = device['device_name']
device = device['device']
fstype = config["CONFIG_SWIFT_STORAGE_FSTYPE"]
if device:
check_device(host, device)
manifestdata += ('\nswift::storage::%s { "%s":\n'
' device => "%s",\n}\n'
% (fstype, devicename, device))
else:
# create loopback device if none was specified
config['CONFIG_SWIFT_STORAGE_SEEK'] = get_storage_size(config)
manifestdata += "\n" + getManifestTemplate("swift_loopback")
# set allowed hosts for firewall
hosts = set([config['CONFIG_STORAGE_HOST']])
if config['CONFIG_NOVA_INSTALL'] == 'y':
hosts |= split_hosts(config['CONFIG_COMPUTE_HOSTS'])
fw_details = dict()
for host in hosts:
key = "swift_storage_and_rsync_%s" % host
fw_details.setdefault(key, {})
fw_details[key]['host'] = "%s" % host
fw_details[key]['service_name'] = "swift storage and rsync"
fw_details[key]['chain'] = "INPUT"
fw_details[key]['ports'] = ['6000', '6001', '6002', '873']
fw_details[key]['proto'] = "tcp"
config['FIREWALL_SWIFT_STORAGE_RULES'] = fw_details
manifestdata += createFirewallResources('FIREWALL_SWIFT_STORAGE_RULES')
appendManifestFile(manifestfile, manifestdata)
def create_common_manifest(config, messages):
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_swift.pp"):
data = getManifestTemplate("swift_common")
appendManifestFile(os.path.split(manifestfile)[1], data)
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""
Interact with AWS S3, using the boto3 library.
"""
import fnmatch
import io
import re
from functools import wraps
from urllib.parse import urlparse
from botocore.exceptions import ClientError
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.exceptions import AirflowException
def provide_bucket_name(func):
"""
Function decorator that provides a bucket name taken from the connection
in case no bucket name has been passed to the function and, if available, also no key has been passed.
"""
@wraps(func)
def wrapper(*args, **kwargs):
func_params = func.__code__.co_varnames
def has_arg(name):
name_in_args = name in func_params and func_params.index(name) < len(args)
name_in_kwargs = name in kwargs
return name_in_args or name_in_kwargs
if not has_arg('bucket_name') and not (has_arg('key') or has_arg('wildcard_key')):
self = args[0]
connection = self.get_connection(self.aws_conn_id)
kwargs['bucket_name'] = connection.schema
return func(*args, **kwargs)
return wrapper
class S3Hook(AwsHook):
"""
Interact with AWS S3, using the boto3 library.
"""
def get_conn(self):
return self.get_client_type('s3')
@staticmethod
def parse_s3_url(s3url):
"""
Parses the S3 Url into a bucket name and key.
:param s3url: The S3 Url to parse.
:rtype s3url: str
:return: the parsed bucket name and key
:rtype: tuple of str
"""
parsed_url = urlparse(s3url)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket_name instead of "{s3url}"'.format(s3url=s3url))
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return bucket_name, key
@provide_bucket_name
def check_for_bucket(self, bucket_name=None):
"""
Check if bucket_name exists.
:param bucket_name: the name of the bucket
:type bucket_name: str
:return: True if it exists and False if not.
:rtype: bool
"""
try:
self.get_conn().head_bucket(Bucket=bucket_name)
return True
except ClientError as e:
self.log.info(e.response["Error"]["Message"])
return False
@provide_bucket_name
def get_bucket(self, bucket_name=None):
"""
Returns a boto3.S3.Bucket object
:param bucket_name: the name of the bucket
:type bucket_name: str
:return: the bucket object to the bucket name.
:rtype: boto3.S3.Bucket
"""
s3_resource = self.get_resource_type('s3')
return s3_resource.Bucket(bucket_name)
@provide_bucket_name
def create_bucket(self, bucket_name=None, region_name=None):
"""
Creates an Amazon S3 bucket.
:param bucket_name: The name of the bucket
:type bucket_name: str
:param region_name: The name of the aws region in which to create the bucket.
:type region_name: str
"""
s3_conn = self.get_conn()
if not region_name:
region_name = s3_conn.meta.region_name
if region_name == 'us-east-1':
self.get_conn().create_bucket(Bucket=bucket_name)
else:
self.get_conn().create_bucket(Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': region_name
})
@provide_bucket_name
def check_for_prefix(self, prefix, delimiter, bucket_name=None):
"""
Checks that a prefix exists in a bucket
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
:return: False if the prefix does not exist in the bucket and True if it does.
:rtype: bool
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
@provide_bucket_name
def list_prefixes(self, bucket_name=None, prefix='', delimiter='',
page_size=None, max_items=None):
"""
Lists prefixes in a bucket under prefix
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
:param page_size: pagination size
:type page_size: int
:param max_items: maximum items to return
:type max_items: int
:return: a list of matched prefixes and None if there are none.
:rtype: list
"""
config = {
'PageSize': page_size,
'MaxItems': max_items,
}
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter,
PaginationConfig=config)
has_results = False
prefixes = []
for page in response:
if 'CommonPrefixes' in page:
has_results = True
for common_prefix in page['CommonPrefixes']:
prefixes.append(common_prefix['Prefix'])
if has_results:
return prefixes
return None
@provide_bucket_name
def list_keys(self, bucket_name=None, prefix='', delimiter='',
page_size=None, max_items=None):
"""
Lists keys in a bucket under prefix and not containing delimiter
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
:param page_size: pagination size
:type page_size: int
:param max_items: maximum items to return
:type max_items: int
:return: a list of matched keys and None if there are none.
:rtype: list
"""
config = {
'PageSize': page_size,
'MaxItems': max_items,
}
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter,
PaginationConfig=config)
has_results = False
keys = []
for page in response:
if 'Contents' in page:
has_results = True
for k in page['Contents']:
keys.append(k['Key'])
if has_results:
return keys
return None
@provide_bucket_name
def check_for_key(self, key, bucket_name=None):
"""
Checks if a key exists in a bucket
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:return: True if the key exists and False if not.
:rtype: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
try:
self.get_conn().head_object(Bucket=bucket_name, Key=key)
return True
except ClientError as e:
self.log.info(e.response["Error"]["Message"])
return False
@provide_bucket_name
def get_key(self, key, bucket_name=None):
"""
Returns a boto3.s3.Object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
:return: the key object from the bucket
:rtype: boto3.s3.Object
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
obj = self.get_resource_type('s3').Object(bucket_name, key)
obj.load()
return obj
@provide_bucket_name
def read_key(self, key, bucket_name=None):
"""
Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:return: the content of the key
:rtype: boto3.s3.Object
"""
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8')
@provide_bucket_name
def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization=None,
output_serialization=None):
"""
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
"""
if input_serialization is None:
input_serialization = {'CSV': {}}
if output_serialization is None:
output_serialization = {'CSV': {}}
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload'].decode('utf-8')
for event in response['Payload']
if 'Records' in event)
@provide_bucket_name
def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
:param wildcard_key: the path to the key
:type wildcard_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
:param delimiter: the delimiter marks key hierarchy
:type delimiter: str
:return: True if a key exists and False if not.
:rtype: bool
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None
@provide_bucket_name
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''):
"""
Returns a boto3.s3.Object object matching the wildcard expression
:param wildcard_key: the path to the key
:type wildcard_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
:param delimiter: the delimiter marks key hierarchy
:type delimiter: str
:return: the key object from the bucket or None if none has been found.
:rtype: boto3.s3.Object
"""
if not bucket_name:
(bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key)
prefix = re.split(r'[*]', wildcard_key, 1)[0]
key_list = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
if key_list:
key_matches = [k for k in key_list if fnmatch.fnmatch(k, wildcard_key)]
if key_matches:
return self.get_key(key_matches[0], bucket_name)
return None
@provide_bucket_name
def load_file(self,
filename,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
client = self.get_conn()
client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args)
@provide_bucket_name
def load_string(self,
string_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
encoding='utf-8'):
"""
Loads a string to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param string_data: str to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
:param encoding: The string to byte encoding
:type encoding: str
"""
bytes_data = string_data.encode(encoding)
file_obj = io.BytesIO(bytes_data)
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt)
@provide_bucket_name
def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
file_obj = io.BytesIO(bytes_data)
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt)
@provide_bucket_name
def load_file_obj(self,
file_obj,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a file object to S3
:param file_obj: The file-like object to set as the content for the S3 key.
:type file_obj: file-like object
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag that indicates whether to overwrite the key
if it already exists.
:type replace: bool
:param encrypt: If True, S3 encrypts the file on the server,
and the file is stored in encrypted form at rest in S3.
:type encrypt: bool
"""
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt)
def _upload_file_obj(self,
file_obj,
key,
bucket_name=None,
replace=False,
encrypt=False):
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
client = self.get_conn()
client.upload_fileobj(file_obj, bucket_name, key, ExtraArgs=extra_args)
def copy_object(self,
source_bucket_key,
dest_bucket_key,
source_bucket_name=None,
dest_bucket_name=None,
source_version_id=None):
"""
Creates a copy of an object that is already stored in S3.
Note: the S3 connection used here needs to have access to both
source and destination bucket/key.
:param source_bucket_key: The key of the source object.
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit source_bucket_name.
:type source_bucket_key: str
:param dest_bucket_key: The key of the object to copy to.
The convention to specify `dest_bucket_key` is the same
as `source_bucket_key`.
:type dest_bucket_key: str
:param source_bucket_name: Name of the S3 bucket where the source object is in.
It should be omitted when `source_bucket_key` is provided as a full s3:// url.
:type source_bucket_name: str
:param dest_bucket_name: Name of the S3 bucket to where the object is copied.
It should be omitted when `dest_bucket_key` is provided as a full s3:// url.
:type dest_bucket_name: str
:param source_version_id: Version ID of the source object (OPTIONAL)
:type source_version_id: str
"""
if dest_bucket_name is None:
dest_bucket_name, dest_bucket_key = self.parse_s3_url(dest_bucket_key)
else:
parsed_url = urlparse(dest_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If dest_bucket_name is provided, ' +
'dest_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
if source_bucket_name is None:
source_bucket_name, source_bucket_key = self.parse_s3_url(source_bucket_key)
else:
parsed_url = urlparse(source_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If source_bucket_name is provided, ' +
'source_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
copy_source = {'Bucket': source_bucket_name,
'Key': source_bucket_key,
'VersionId': source_version_id}
response = self.get_conn().copy_object(Bucket=dest_bucket_name,
Key=dest_bucket_key,
CopySource=copy_source)
return response
def delete_objects(self, bucket, keys):
"""
Delete keys from the bucket.
:param bucket: Name of the bucket in which you are going to delete object(s)
:type bucket: str
:param keys: The key(s) to delete from S3 bucket.
When ``keys`` is a string, it's supposed to be the key name of
the single object to delete.
When ``keys`` is a list, it's supposed to be the list of the
keys to delete.
:type keys: str or list
"""
if isinstance(keys, list):
keys = keys
else:
keys = [keys]
delete_dict = {"Objects": [{"Key": k} for k in keys]}
response = self.get_conn().delete_objects(Bucket=bucket, Delete=delete_dict)
return response
|
|
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
clf = PassiveAggressiveClassifier(
C=1.0, max_iter=30, fit_intercept=fit_intercept,
random_state=1, average=average, tol=None)
clf.fit(data, y)
score = clf.score(data, y)
assert score > 0.79
if average:
assert hasattr(clf, '_average_coef')
assert hasattr(clf, '_average_intercept')
assert hasattr(clf, '_standard_intercept')
assert hasattr(clf, '_standard_coef')
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
for average in (False, True):
clf = PassiveAggressiveClassifier(random_state=0,
average=average,
max_iter=5)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert score > 0.79
if average:
assert hasattr(clf, '_average_coef')
assert hasattr(clf, '_average_intercept')
assert hasattr(clf, '_standard_intercept')
assert hasattr(clf, '_standard_coef')
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier(max_iter=5).fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
@pytest.mark.parametrize('loss', ("hinge", "squared_hinge"))
def test_classifier_correctness(loss):
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPassiveAggressive(loss=loss, n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(loss=loss, max_iter=2,
shuffle=False, tol=None)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier(max_iter=100)
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, max_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, max_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced", max_iter=100)
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(
C=0.1, max_iter=1000, tol=None, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(
C=0.1, max_iter=1000, tol=None, class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(
C=0.1, max_iter=1000, tol=None, class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5}, max_iter=100)
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5], max_iter=100)
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch", max_iter=100)
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
reg = PassiveAggressiveRegressor(
C=1.0, fit_intercept=fit_intercept,
random_state=0, average=average, max_iter=5)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert np.mean((pred - y_bin) ** 2) < 1.7
if average:
assert hasattr(reg, '_average_coef')
assert hasattr(reg, '_average_intercept')
assert hasattr(reg, '_standard_intercept')
assert hasattr(reg, '_standard_coef')
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for average in (False, True):
reg = PassiveAggressiveRegressor(random_state=0,
average=average, max_iter=100)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert np.mean((pred - y_bin) ** 2) < 1.7
if average:
assert hasattr(reg, '_average_coef')
assert hasattr(reg, '_average_intercept')
assert hasattr(reg, '_standard_intercept')
assert hasattr(reg, '_standard_coef')
@pytest.mark.parametrize(
'loss',
("epsilon_insensitive", "squared_epsilon_insensitive"))
def test_regressor_correctness(loss):
y_bin = y.copy()
y_bin[y != 1] = -1
reg1 = MyPassiveAggressive(loss=loss, n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(tol=None, loss=loss, max_iter=2,
shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor(max_iter=100)
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
# TODO: remove in 0.25
@pytest.mark.parametrize('klass', [PassiveAggressiveClassifier,
PassiveAggressiveRegressor])
def test_passive_aggressive_deprecated_attr(klass):
est = klass(average=True)
est.fit(X, y)
msg = "Attribute {} was deprecated"
for att in ['average_coef_', 'average_intercept_',
'standard_coef_', 'standard_intercept_']:
with pytest.warns(FutureWarning, match=msg.format(att)):
getattr(est, att)
|
|
"""
This module defines a basis of operators for the Standar Model
effective lagrangian up to dimension 6.
The basis is the one in arXiv:1412.8480v2_.
.. _arXiv:1412.8480v2: https://arxiv.org/abs/1412.8480v2.
"""
from fractions import Fraction
from matchingtools.core import (
tensor_op, flavor_tensor_op, D, Op, OpSum, i_op, number_op)
from matchingtools.extras.SM import (
phi, phic, lL, lLc, eR, eRc, qL, qLc, uR, uRc, dR, dRc, bFS, wFS, gFS,
ye, yec, yd, ydc, yu, yuc, V, Vc, mu2phi, lambdaphi)
from matchingtools.extras.SU2 import epsSU2, sigmaSU2, epsSU2triplets
from matchingtools.extras.SU3 import epsSU3, TSU3, fSU3
from matchingtools.extras.Lorentz import (
epsUp, epsUpDot, epsDown, epsDownDot, sigma4, sigma4bar,
eps4, sigmaTensor)
# -- Standard Model dimension 4 operators --
Okinphi = tensor_op("Okinphi")
r"""
Higgs kinetic term
:math:`\mathcal{O}_{kin,\phi} = (D_\mu \phi)^\dagger D^\mu \phi`.
"""
Ophi4 = tensor_op("Ophi4")
r"""
Higgs quartic coupling
:math:`\mathcal{O}_{\phi 4} = (\phi^\dagger\phi)^2`.
"""
Ophi2 = tensor_op("Ophi2")
r"""
Higgs quadratic coupling
:math:`\mathcal{O}_{\phi 2} = \phi^\dagger\phi`.
"""
Oye = flavor_tensor_op("Oye")
r"""
Lepton Yukawa coupling
:math:`(\mathcal{O}_{y^e})_{ij} = \bar{l}_{Li}\phi e_{Rj}`.
"""
Oyec = flavor_tensor_op("Oyec")
r"""
Conjugate lepton Yukawa coupling
:math:`(\mathcal{O}_{y^e})^*_{ij} = \bar{e}_{Rj}\phi^\dagger l_{Li}`.
"""
Oyd = flavor_tensor_op("Oyd")
r"""
Down quark Yukawa coupling
:math:`(\mathcal{O}_{y^d})_{ij} = \bar{q}_{Li}\phi d_{Rj}`.
"""
Oydc = flavor_tensor_op("Oydc")
r"""
Conjugate down quark Yukawa coupling
:math:`(\mathcal{O}_{y^d})^*_{ij} = \bar{d}_{Rj}\phi^\dagger q_{Li}`.
"""
Oyu = flavor_tensor_op("Oyu")
r"""
Up quark Yukawa coupling
:math:`(\mathcal{O}_{y^u})_{ij} = \bar{q}_{Li}\tilde{\phi} u_{Rj}`.
"""
Oyuc = flavor_tensor_op("Oyuc")
r"""
Conjugate up quark Yukawa coupling
:math:`(\mathcal{O}_{y^d})^*_{ij} =
\bar{d}_{Rj}\tilde{\phi}^\dagger q_{Li}`.
"""
# -- Standard Model dimension 5 operators --
O5 = flavor_tensor_op("O5")
r"""
Weinberg operator
:math:`\mathcal{O}_5 =
\overline{l^c}_L\tilde{\phi}^*\tilde{\phi}^\dagger l_L`.
"""
O5c = flavor_tensor_op("O5c")
r"""
Conjugate Weinberg operator
:math:`\mathcal{O}_5 =
\bar{l}_L\tilde{\phi}\tilde{\phi}^T l^c_L`.
"""
# -- Standard Model dimension 6 four-fermion operators --
# *** LLLL ***
O1ll = flavor_tensor_op("O1ll")
r"""
LLLL type four-fermion operator
:math:`(\mathcal{O}^{(1)}_{ll})_{ijkl}=
\frac{1}{2}(\bar{l}_{Li}\gamma_\mu l_{Lj})
(\bar{l}_{Lk}\gamma^\mu l_{Ll})`.
"""
O1qq = flavor_tensor_op("O1qq")
r"""
LLLL type four-fermion operator
:math:`(\mathcal{O}^{(1)}_{qq})_{ijkl}=
\frac{1}{2}(\bar{q}_{Li}\gamma_\mu q_{Lj})
(\bar{q}_{Lk}\gamma^\mu q_{Ll})`.
"""
O8qq = flavor_tensor_op("O8qq")
r"""
LLLL type four-fermion operator
:math:`(\mathcal{O}^{(8)}_{qq})_{ijkl}=
\frac{1}{2}(\bar{q}_{Li}T_A \gamma_\mu q_{Lj})
(\bar{q}_{Lk}T_A \gamma^\mu q_{Ll})`.
"""
O1lq = flavor_tensor_op("O1lq")
r"""
LLLL type four-fermion operator
:math:`(\mathcal{O}^{(1)}_{lq})_{ijkl}=
(\bar{l}_{Li}\gamma_\mu l_{Lj})
(\bar{q}_{Lk}\gamma^\mu q_{Ll})`.
"""
O3lq = flavor_tensor_op("O3lq")
r"""
LLLL type four-fermion operator
:math:`(\mathcal{O}^{(8)}_{qq})_{ijkl}=
(\bar{l}_{Li}\sigma_a \gamma_\mu l_{Lj})
(\bar{q}_{Lk}\sigma_a \gamma^\mu q_{Ll})`.
"""
# *** RRRR ***
Oee = flavor_tensor_op("Oee")
r"""
RRRR type four-fermion operator
:math:`(\mathcal{O}_{ee})_{ijkl}=
\frac{1}{2}(\bar{e}_{Ri}\gamma_\mu e_{Rj})
(\bar{e}_{Rk}\gamma^\mu e_{Rl})`.
"""
O1uu = flavor_tensor_op("O1uu")
r"""
RRRR type four-fermion operator
:math:`(\mathcal{O}^{(1)}_{uu})_{ijkl}=
\frac{1}{2}(\bar{u}_{Ri}\gamma_\mu u_{Rj})
(\bar{u}_{Rk}\gamma^\mu u_{Rl})`.
"""
O1dd = flavor_tensor_op("O1dd")
r"""
RRRR type four-fermion operator
:math:`(\mathcal{O}^{(1)}_{dd})_{ijkl}=
\frac{1}{2}(\bar{d}_{Ri}\gamma_\mu d_{Rj})
(\bar{d}_{Rk}\gamma^\mu d_{Rl})`.
"""
O1ud = flavor_tensor_op("O1ud")
r"""
RRRR type four-fermion operator
:math:`(\mathcal{O}^{(1)}_{uu})_{ijkl}=
(\bar{u}_{Ri}\gamma_\mu u_{Rj})
(\bar{d}_{Rk}\gamma^\mu d_{Rl})`.
"""
O8ud = flavor_tensor_op("O8ud")
r"""
RRRR type four-fermion operator
:math:`(\mathcal{O}^{(8)}_{uu})_{ijkl}=
(\bar{u}_{Ri}T_A \gamma_\mu u_{Rj})
(\bar{d}_{Rk}T_A \gamma^\mu d_{Rl})`.
"""
Oeu = flavor_tensor_op("Oeu")
r"""
RRRR type four-fermion operator
:math:`(\mathcal{O}_{eu})_{ijkl}=
(\bar{e}_{Ri}\gamma_\mu e_{Rj})
(\bar{u}_{Rk}\gamma^\mu u_{Rl})`.
"""
Oed = flavor_tensor_op("Oed")
r"""
RRRR type four-fermion operator
:math:`(\mathcal{O}_{ed})_{ijkl}=
(\bar{e}_{Ri}\gamma_\mu e_{Rj})
(\bar{d}_{Rk}\gamma^\mu d_{Rl})`.
"""
# *** LLRR and LRRL ***
Ole = flavor_tensor_op("Ole")
r"""
LLRR type four-fermion operator
:math:`(\mathcal{O}_{le})_{ijkl}=
(\bar{l}_{Li}\gamma_\mu l_{Lj})
(\bar{e}_{Rk}\gamma^\mu e_{Rl})`.
"""
Oqe = flavor_tensor_op("Oqe")
r"""
LLRR type four-fermion operator
:math:`(\mathcal{O}_{qe})_{ijkl}=
(\bar{q}_{Li}\gamma_\mu q_{Lj})
(\bar{e}_{Rk}\gamma^\mu e_{Rl})`.
"""
Olu = flavor_tensor_op("Olu")
r"""
LLRR type four-fermion operator
:math:`(\mathcal{O}_{lu})_{ijkl}=
(\bar{l}_{Li}\gamma_\mu l_{Lj})
(\bar{u}_{Rk}\gamma^\mu u_{Rl})`.
"""
Old = flavor_tensor_op("Old")
r"""
LLRR type four-fermion operator
:math:`(\mathcal{O}_{ld})_{ijkl}=
(\bar{l}_{Li}\gamma_\mu l_{Lj})
(\bar{d}_{Rk}\gamma^\mu d_{Rl})`.
"""
O1qu = flavor_tensor_op("O1qu")
r"""
LLRR type four-fermion operator
:math:`(\mathcal{O}^{(1)}_{qu})_{ijkl}=
(\bar{q}_{Li}\gamma_\mu q_{Lj})
(\bar{u}_{Rk}\gamma^\mu u_{Rl})`.
"""
O8qu = flavor_tensor_op("O8qu")
r"""
LLRR type four-fermion operator
:math:`(\mathcal{O}^{(8)}_{qu})_{ijkl}=
(\bar{q}_{Li}T_A\gamma_\mu q_{Lj})
(\bar{u}_{Rk}T_A\gamma^\mu u_{Rl})`.
"""
O1qd = flavor_tensor_op("O1qd")
r"""
LLRR type four-fermion operator
:math:`(\mathcal{O}^{(1)}_{qd})_{ijkl}=
(\bar{q}_{Li}\gamma_\mu q_{Lj})
(\bar{d}_{Rk}\gamma^\mu d_{Rl})`.
"""
O8qd = flavor_tensor_op("O8qd")
r"""
LLRR type four-fermion operator
:math:`(\mathcal{O}^{(8)}_{qd})_{ijkl}=
(\bar{q}_{Li}T_A\gamma_\mu q_{Lj})
(\bar{d}_{Rk}T_A\gamma^\mu d_{Rl})`.
"""
Oledq = flavor_tensor_op("Oledq")
r"""
LRRL type four-fermion operator
:math:`(\mathcal{O}_{leqd})_{ijkl}=
(\bar{l}_{Li} e_{Rj})
(\bar{d}_{Rk} q_{Ll})`.
"""
Oledqc = flavor_tensor_op("Oledqc")
r"""
LRRL type four-fermion operator
:math:`(\mathcal{O}_{leqd})^*_{ijkl}=
(\bar{e}_{Rj} l_{Li})
(\bar{q}_{Ll} d_{Rk})`.
"""
# *** LRLR ***
O1qud = flavor_tensor_op("O1qud")
r"""
LRLR type four-fermion operator
:math:`(\mathcal{O}^{(1)}_{qud})_{ijkl}=
(\bar{q}_{Li} u_{Rj})i\sigma_2
(\bar{q}_{Lk} d_{Rl})^T`.
"""
O1qudc = flavor_tensor_op("O1qudc")
r"""
LRLR type four-fermion operator
:math:`(\mathcal{O}^{(1)}_{qud})^*_{ijkl}=
(\bar{u}_{Rj} q_{Li})i\sigma_2
(\bar{d}_{Rl} q_{Lk})^T`.
"""
O8qud = flavor_tensor_op("O8qud")
r"""
LRLR type four-fermion operator
:math:`(\mathcal{O}^{(8)}_{qud})_{ijkl}=
(\bar{q}_{Li}T_A u_{Rj})i\sigma_2
(\bar{q}_{Lk}T_A d_{Rl})^T`.
"""
O8qudc = flavor_tensor_op("O8qudc")
r"""
LRLR type four-fermion operator
:math:`(\mathcal{O}^{(8)}_{qud})^*_{ijkl}=
(\bar{u}_{Rj} T_A q_{Li})i\sigma_2
(\bar{d}_{Rl} T_A q_{Lk}})^T`.
"""
Olequ = flavor_tensor_op("Olequ")
r"""
LRLR type four-fermion operator
:math:`(\mathcal{O}_{lequ})_{ijkl}=
(\bar{l}_{Li} e_{Rj})i\sigma_2
(\bar{q}_{Lk} u_{Rl})^T`.
"""
Olequc = flavor_tensor_op("Olequc")
r"""
LRLR type four-fermion operator
:math:`(\mathcal{O}_{lequ})^*_{ijkl}=
(\bar{e}_{Rj} l_{Li})i\sigma_2
(\bar{u}_{Rl} q_{Lk})^T`.
"""
Oluqe = flavor_tensor_op("Oluqe")
r"""
LRLR type four-fermion operator
:math:`(\mathcal{O}_{luqe})_{ijkl}=
(\bar{l}_{Li} u_{Rj})i\sigma_2
(\bar{q}_{Lk} e_{Rl})^T`.
"""
Oluqec = flavor_tensor_op("Oluqec")
r"""
LRLR type four-fermion operator
:math:`(\mathcal{O}_{luqe})^*_{ijkl}=
(\bar{l}_{Li} u_{Rj})i\sigma_2
(\bar{q}_{Lk} e_{Rl})^T`.
"""
# *** \slashed{B} and slashed{L} ***
Olqdu = flavor_tensor_op("Olqdu")
r"""
Four-fermion operator
:math:`(\mathcal{O}_{lqdu})_{ijkl}=
\epsilon_{ABC}(\bar{l}_{Li} i\sigma_2 q^{c,A}_{Lj})
(\bar{d}^B_{Rk} u^{c,C}_{Rl})`.
"""
Olqduc = flavor_tensor_op("Olqduc")
r"""
Four-fermion operator
:math:`(\mathcal{O}_{lqdu})^*_{ijkl}=
-\epsilon_{ABC}(\bar{q}^{c,A}_{Lj}i\sigma_2 l_{Li})
(\bar{u}^{c,C}_{Rl} d^B_{Rk})`.
"""
Oqqeu = flavor_tensor_op("Oqqeu")
r"""
Four-fermion operator
:math:`(\mathcal{O}_{lqdu})_{ijkl}=
\epsilon_{ABC}(\bar{q}^A_{Li} i\sigma_2 q^{c,B}_{Lj})
(\bar{e}_{Rk} u^{c,C}_{Rl})`.
"""
Oqqeuc = flavor_tensor_op("Oqqeuc")
r"""
Four-fermion operator
:math:`(\mathcal{O}_{lqdu})^*_{ijkl}=
-\epsilon_{ABC}(\bar{q}^{c,B}_{Lj}i\sigma_2 q^A_{Li})
(\bar{u}^{c,C}_{Rl} e_{Rk})`.
"""
O1lqqq = flavor_tensor_op("O1lqqq")
r"""
Four-fermion operator
:math:`(\mathcal{O}^{{(1)}}_{lqqq})_{ijkl}=
\epsilon_{ABC}(\bar{l}_{Li} i\sigma_2 q^{c,A}_{Lj})
(\bar{q}^B_{Lk} i\sigma_2 q^{c,C}_{Ll})`.
"""
O1lqqqc = flavor_tensor_op("O1lqqqc")
r"""
Four-fermion operator
:math:`(\mathcal{O}^{{(1)}}_{lqqq})^*_{ijkl}=
\epsilon_{ABC}(\bar{q}^{c,A}_{Lj} i\sigma_2 l_{i})
(\bar{q}^{c,C}_{Ll} i\sigma_2 q^B_{Lk})`.
"""
Oudeu = flavor_tensor_op("Oudeu")
r"""
Four-fermion operator
:math:`(\mathcal{O}_{udeu})_{ijkl}=
\epsilon_{ABC}(\bar{u}^A_{Ri} d^{c,B}_{Rj})
(\bar{e}_{Rk} u^{c,C}_{Rl})`.
"""
Oudeuc = flavor_tensor_op("Oudeuc")
r"""
Four-fermion operator
:math:`(\mathcal{O}_{udeu})^*_{ijkl}=
\epsilon_{ABC}(\bar{d}^{c,B}_{Rj} u^A_{Ri})
(\bar{u}^{c,C}_{Rl} e_{Rk})`.
"""
O3lqqq = flavor_tensor_op("O3lqqq")
r"""
Four-fermion operator
:math:`(\mathcal{O}^{{(3)}}_{lqqq})_{ijkl}=
\epsilon_{ABC}(\bar{l}_{Li} \sigma_a i\sigma_2 q^{c,A}_{Lj})
(\bar{q}^B_{Lk} \sigma_a i\sigma_ 2 q^{c,C}_{Ll})`.
"""
O3lqqqc = flavor_tensor_op("O3lqqqc")
r"""
Four-fermion operator
:math:`(\mathcal{O}^{{(3)}}_{lqqq})^*_{ijkl}=
\epsilon_{ABC}(\bar{q}^{c,A}_{Lj} i sigma_2 \sigma_a l_{Li} )
(\bar{q}^{c,C}_{Ll} i\sigma_2 \sigma_a q^B_{Lk})`.
"""
# -- Standard Model dimension six operators other than four-fermion --
# *** S ***
Ophisq = tensor_op("Ophisq")
r"""
S type operator
:math:`\mathcal{O}_{\phi\square}=\phi^\dagger\phi\square(\phi^\dagger\phi)`.
"""
Ophi = tensor_op("Ophi")
r"""
S type six Higgs interaction operator
:math:`\mathcal{O}_\phi = \frac{1}{3}(\phi^\dagger\phi)^3`.
"""
# *** SVF ***
O1phil = flavor_tensor_op("O1phil")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi l})_{ij}=
(\phi^\dagger i D_\mu \phi)(\bar{l}_{Li}\gamma^\mu l_{Lj})`.
"""
O1philc = flavor_tensor_op("O1philc")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi l})^*_{ij}=
(-i (D_\mu \phi)^\dagger \phi)(\bar{l}_{Lj}\gamma^\mu l_{Li})`.
"""
O1phiq = flavor_tensor_op("O1phiq")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi q})_{ij}=
(\phi^\dagger i D_\mu \phi)(\bar{q}_{Li}\gamma^\mu q_{Lj})`.
"""
O1phiqc = flavor_tensor_op("O1phiqc")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi q})^*_{ij}=
(-i (D_\mu \phi)^\dagger \phi)(\bar{q}_{Lj}\gamma^\mu q_{Li})`.
"""
O3phil = flavor_tensor_op("O3phil")
r"""
SVF type operator :math:`(\mathcal{O}^{(3)}_{\phi l})_{ij}=
(\phi^\dagger i D_\mu \phi)(\bar{l}_{Li}\gamma^\mu l_{Lj})`.
"""
O3philc = flavor_tensor_op("O3philc")
r"""
SVF type operator :math:`(\mathcal{O}^{(3)}_{\phi l})^*_{ij}=
(-i (D_\mu \phi)^\dagger \phi)(\bar{l}_{Lj}\gamma^\mu l_{Li})`.
"""
O3phiq = flavor_tensor_op("O3phiq")
r"""
SVF type operator :math:`(\mathcal{O}^{(3)}_{\phi q})_{ij}=
(\phi^\dagger i D_\mu \phi)(\bar{q}_{Li}\gamma^\mu q_{Lj})`.
"""
O3phiqc = flavor_tensor_op("O3phiqc")
r"""
SVF type operator :math:`(\mathcal{O}^{(3)}_{\phi q})^*_{ij}=
(-i (D_\mu \phi)^\dagger \phi)(\bar{q}_{Lj}\gamma^\mu q_{Li})`.
"""
O1phie = flavor_tensor_op("O1phie")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi e})_{ij}=
(\phi^\dagger i D_\mu \phi)(\bar{e}_{Ri}\gamma^\mu e_{Rj})`.
"""
O1phiec = flavor_tensor_op("O1phiec")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi e})^*_{ij}=
(-i (D_\mu \phi)^\dagger \phi)(\bar{e}_{Rj}\gamma^\mu e_{Ri})`.
"""
O1phid = flavor_tensor_op("O1phid")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi d})_{ij}=
(\phi^\dagger i D_\mu \phi)(\bar{d}_{Ri}\gamma^\mu d_{Rj})`.
"""
O1phidc = flavor_tensor_op("O1phidc")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi d})^*_{ij}=
(-i (D_\mu \phi)^\dagger \phi)(\bar{d}_{Rj}\gamma^\mu d_{Ri})`.
"""
O1phiu = flavor_tensor_op("O1phiu")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi u})_{ij}=
(\phi^\dagger i D_\mu \phi)(\bar{u}_{Ri}\gamma^\mu u_{Rj})`.
"""
O1phiuc = flavor_tensor_op("O1phiuc")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi u})^*_{ij}=
(-i (D_\mu \phi)^\dagger \phi)(\bar{u}_{Rj}\gamma^\mu u_{Ri})`.
"""
Ophiud = flavor_tensor_op("Ophiud")
r"""
SVF type operator :math:`(\mathcal{O}^{(1)}_{\phi ud})_{ij}=
-(\tilde{\phi}^\dagger i D_\mu \phi)(\bar{u}_{Ri}\gamma^\mu d_{Rj})`.
"""
Ophiudc = flavor_tensor_op("Ophiudc")
r"""
SVF type operator :math:`(\mathcal{O}_{\phi ud})^*_{ij}=
(i (D_\mu \phi)^\dagger \tilde{\phi})(\bar{u}_{Rj}\gamma^\mu d_{Ri})`.
"""
# *** STF ***
OeB = flavor_tensor_op("OeB")
r"""
STF type operator :math:`(\mathcal{O}_{eB})_{ij}=
(\bar{l}_{Li}\sigma^{\mu\nu}e_{Rj})\phi B_{\mu\nu}`.
"""
OeBc = flavor_tensor_op("OeBc")
r"""
STF type operator :math:`(\mathcal{O}_{eB})^*_{ij}=
\phi^\dagger (\bar{e}_{Rj}\sigma^{\mu\nu}l_{Li}) B_{\mu\nu}`.
"""
OeW = flavor_tensor_op("OeW")
r"""
STF type operator :math:`(\mathcal{O}_{eW})_{ij}=
(\bar{l}_{Li}\sigma^{\mu\nu}e_{Rj})\sigma^a\phi W^a_{\mu\nu}`.
"""
OeWc = flavor_tensor_op("OeWc")
r"""
STF type operator :math:`(\mathcal{O}_{eW})^*_{ij}=
\phi^\dagger\sigma^a(\bar{e}_{Rj}\sigma^{\mu\nu}l_{Li}) W^a_{\mu\nu}`.
"""
OuB = flavor_tensor_op("OuB")
r"""
STF type operator :math:`(\mathcal{O}_{uB})_{ij}=
(\bar{q}_{Li}\sigma^{\mu\nu}u_{Rj})\tilde{\phi} B_{\mu\nu}`.
"""
OuBc = flavor_tensor_op("OuBc")
r"""
STF type operator :math:`(\mathcal{O}_{uB})^*_{ij}=
\tilde{\phi}^\dagger(\bar{u}_{Rj}\sigma^{\mu\nu}q_{Li}) B_{\mu\nu}`.
"""
OuW = flavor_tensor_op("OuW")
r"""
STF type operator :math:`(\mathcal{O}_{uW})_{ij}=
(\bar{q}_{Li}\sigma^{\mu\nu}u_{Rj})\sigma^a\tilde{\phi} W^a_{\mu\nu}`.
"""
OuWc = flavor_tensor_op("OuWc")
r"""
STF type operator :math:`(\mathcal{O}_{uW})^*_{ij}=
\tilde{\phi}\sigma^a(\bar{u}_{Rj}\sigma^{\mu\nu}q_{Li}) W^a_{\mu\nu}`.
"""
OdB = flavor_tensor_op("OdB")
r"""
STF type operator :math:`(\mathcal{O}_{dB})_{ij}=
(\bar{q}_{Li}\sigma^{\mu\nu}d_{Rj})\phi B_{\mu\nu}`.
"""
OdBc = flavor_tensor_op("OdBc")
r"""
STF type operator :math:`(\mathcal{O}_{dB})^*_{ij}=
\phi^\dagger(\bar{d}_{Rj}\sigma^{\mu\nu}q_{Li}) B_{\mu\nu}`.
"""
OdW = flavor_tensor_op("OdW")
r"""
STF type operator :math:`(\mathcal{O}_{dW})_{ij}=
(\bar{q}_{Li}\sigma^{\mu\nu}d_{Rj})\sigma^a\phi W^a_{\mu\nu}`.
"""
OdWc = flavor_tensor_op("OdWc")
r"""
STF type operator :math:`(\mathcal{O}_{dW})^*_{ij}=
\phi^\dagger\sigma^a(\bar{d}_{Rj}\sigma^{\mu\nu}q_{Li}) W^a_{\mu\nu}`.
"""
OuG = flavor_tensor_op("OuG")
r"""
STF type operator :math:`(\mathcal{O}_{uG})_{ij}=
(\bar{q}_{Li}\sigma^{\mu\nu}T_A u_{Rj})\tilde{\phi} G^A_{\mu\nu}`.
"""
OuGc = flavor_tensor_op("OuGc")
r"""
STF type operator :math:`(\mathcal{O}_{uG})^*_{ij}=
\tilde{\phi}^\dagger(\bar{u}_{Rj}\sigma^{\mu\nu}T_A q_{Li}) G^A_{\mu\nu}`.
"""
OdG = flavor_tensor_op("OdG")
r"""
STF type operator :math:`(\mathcal{O}_{dG})_{ij}=
(\bar{q}_{Li}\sigma^{\mu\nu}T_A d_{Rj})\phi G^A_{\mu\nu}`.
"""
OdGc = flavor_tensor_op("OdGc")
r"""
STF type operator :math:`(\mathcal{O}_{dG})^*_{ij}=
\phi^\dagger(\bar{d}_{Rj}\sigma^{\mu\nu}T_A q_{Li}) G^A_{\mu\nu}`.
"""
# *** SF ***
Oephi = flavor_tensor_op("Oephi")
r"""
SF type operator :math:`(\mathcal{O}_{e\phi})_{ij}=
(\phi^\dagger\phi)(\bar{l}_{Li}\phi e_{Rj})`.
"""
Odphi = flavor_tensor_op("Odphi")
r"""
SF type operator :math:`(\mathcal{O}_{d\phi})_{ij}=
(\phi^\dagger\phi)(\bar{q}_{Li}\phi d_{Rj})`.
"""
Ouphi = flavor_tensor_op("Ouphi")
r"""
SF type operator :math:`(\mathcal{O}_{u\phi})_{ij}=
(\phi^\dagger\phi)(\bar{q}_{Li}\tilde{\phi} u_{Rj})`.
"""
Oephic = flavor_tensor_op("Oephic")
r"""
SF type operator :math:`(\mathcal{O}_{e\phi})^*_{ij}=
(\phi^\dagger\phi)(\bar{e}_{Rj}\phi^\dagger l_{Li})`.
"""
Odphic = flavor_tensor_op("Odphic")
r"""
SF type operator :math:`(\mathcal{O}_{d\phi})^*_{ij}=
(\phi^\dagger\phi)(\bar{d}_{Rj}\phi^\dagger q_{Li})`.
"""
Ouphic = flavor_tensor_op("Ouphic")
r"""
SF type operator :math:`(\mathcal{O}_{u\phi})^*_{ij}=
(\phi^\dagger\phi)(\bar{u}_{Rj}\tilde{\phi}^\dagger q_{Li})`.
"""
# *** Oblique ***
OphiD = tensor_op("OphiD")
r"""
Oblique operator :math:`\mathcal{O}_{\phi D}=(\phi^\dagger D_\mu \phi)((D^\mu\phi)^\dagger\phi)`.
"""
OphiB = tensor_op("OphiB")
r"""
Oblique operator
:math:`\mathcal{O}_{\phi B}=\phi^\dagger\phi B_{\mu\nu}B^{\mu\nu}`.
"""
OphiBTilde = tensor_op("OphiBTilde")
r"""
Oblique operator
:math:`\mathcal{O}_{\phi \tilde{B}}=
\phi^\dagger\phi \tilde{B}_{\mu\nu}B^{\mu\nu}`.
"""
OphiW = tensor_op("OphiW")
r"""
Oblique operator
:math:`\mathcal{O}_{\phi W}=
\phi^\dagger\phi W^a_{\mu\nu}W^{a,\mu\nu}`.
"""
OphiWTilde = tensor_op("OphiWTilde")
r"""
Oblique operator
:math:`\mathcal{O}_{\phi \tilde{W}}=
\phi^\dagger\phi \tilde{W}^a_{\mu\nu}W^{a,\mu\nu}`.
"""
OWB = tensor_op("OWB")
r"""
Oblique operator
:math:`\mathcal{O}_{W B}=
\phi^\dagger\sigma^a\phi W^a_{\mu\nu}B^{\mu\nu}`.
"""
OWBTilde = tensor_op("OWBTilde")
r"""
Oblique operator
:math:`\mathcal{O}_{\tilde{W} B}=
\phi^\dagger\sigma^a\phi \tilde{W}^a_{\mu\nu}B^{\mu\nu}`.
"""
OphiG = tensor_op("OphiG")
r"""
Oblique operator
:math:`\mathcal{O}_{\phi W}=
\phi^\dagger\phi G^A_{\mu\nu}G^{A,\mu\nu}`.
"""
OphiGTilde = tensor_op("OphiGTilde")
r"""
Oblique operator
:math:`\mathcal{O}_{\phi \tilde{W}}=
\phi^\dagger\phi \tilde{G}^A_{\mu\nu}G^{A,\mu\nu}`.
"""
# *** Gauge ***
OW = tensor_op("OW")
r"""
Gauge type operator
:math:`\mathcal{O}_W=
\varepsilon_{abc}W^{a,\nu}_\mu W^{b,\rho}_\nu W^{c,\mu}_\rho`.
"""
OWTilde = tensor_op("OWTilde")
r"""
Gauge type operator
:math:`\mathcal{O}_{\tilde{W}}=
\varepsilon_{abc}\tilde{W}^{a,\nu}_\mu
W^{b,\rho}_\nu W^{c,\mu}_\rho`.
"""
OG = tensor_op("OG")
r"""
Gauge type operator
:math:`\mathcal{O}_G=
f_{ABC}G^{A,\nu}_\mu G^{B,\rho}_\nu G^{C,\mu}_\rho`.
"""
OGTilde = tensor_op("OGTilde")
r"""
Gauge type operator
:math:`\mathcal{O}_{\tilde{G}}=
f_{ABC}\tilde{G}^{A,\nu}_\mu
G^{B,\rho}_\nu G^{C,\mu}_\rho`.
"""
# Auxiliary operators for intermediate calculations
O5aux = flavor_tensor_op("O5aux")
O5auxc = flavor_tensor_op("O5auxc")
Olqqqaux = flavor_tensor_op("Olqqqaux")
Olqqqauxc = flavor_tensor_op("Olqqqauxc")
rules_basis_defs_dim_6_5 = [
# Standard Model dimension 6 four-fermion operators
# LLLL type
(Op(lLc(0, 1, -1), sigma4bar(2, 0, 3), lL(3, 1, -2),
lLc(4, 5, -3), sigma4bar(2, 4, 6), lL(6, 5, -4)),
OpSum(number_op(2) * O1ll(-1, -2, -3, -4))),
(Op(qLc(0, 1, 2, -1), sigma4bar(3, 0, 4), qL(4, 1, 2, -2),
qLc(5, 6, 7, -3), sigma4bar(3, 5, 8), qL(8, 6, 7, -4)),
OpSum(number_op(2) * O1qq(-1, -2, -3, -4))),
(Op(qLc(0, 1, 2, -1), sigma4bar(3, 0, 4),
TSU3(5, 1, 6), qL(4, 6, 2, -2),
qLc(7, 8, 9, -3), sigma4bar(3, 7, 10),
TSU3(5, 8, 11), qL(10, 11, 9, -4)),
OpSum(number_op(2) * O8qq(-1, -2, -3, -4))),
(Op(lLc(0, 1, -1), sigma4bar(2, 0, 3), lL(3, 1, -2),
qLc(4, 5, 6, -3), sigma4bar(2, 4, 7), qL(7, 5, 6, -4)),
OpSum(O1lq(-1, -2, -3, -4))),
(Op(lLc(0, 1, -1), sigma4bar(2, 0, 3),
sigmaSU2(4, 1, 5), lL(3, 5, -2),
qLc(6, 7, 8, -3), sigma4bar(2, 6, 9),
sigmaSU2(4, 8, 10), qL(9, 7, 10, -4)),
OpSum(O3lq(-1, -2, -3, -4))),
# RRRR type
(Op(eRc(0, -1), sigma4(1, 0, 2), eR(2, -2),
eRc(3, -3), sigma4(1, 3, 4), eR(4, -4)),
OpSum(number_op(2) * Oee(-1, -2, -3, -4))),
(Op(uRc(0, 1, -1), sigma4(2, 0, 3), uR(3, 1, -2),
uRc(4, 5, -3), sigma4(2, 4, 6), uR(6, 5, -4)),
OpSum(number_op(2) * O1uu(-1, -2, -3, -4))),
(Op(dRc(0, 1, -1), sigma4(2, 0, 3), dR(3, 1, -2),
dRc(4, 5, -3), sigma4(2, 4, 6), dR(6, 5, -4)),
OpSum(number_op(2) * O1dd(-1, -2, -3, -4))),
(Op(uRc(0, 1, -1), sigma4(2, 0, 3), uR(3, 1, -2),
dRc(4, 5, -3), sigma4(2, 4, 6), dR(6, 5, -4)),
OpSum(O1ud(-1, -2, -3, -4))),
(Op(uRc(0, 1, -1), sigma4(2, 0, 3),
TSU3(4, 1, 5), uR(3, 5, -2),
dRc(6, 7, -3), sigma4(2, 6, 8),
TSU3(4, 7, 9), dR(8, 9, -4)),
OpSum(O8ud(-1, -2, -3, -4))),
(Op(eRc(0, -1), sigma4(2, 0, 3), eR(3, -2),
uRc(4, 5, -3), sigma4(2, 4, 6), uR(6, 5, -4)),
OpSum(Oeu(-1, -2, -3, -4))),
(Op(eRc(0, -1), sigma4(2, 0, 3), eR(3, -2),
dRc(4, 5, -3), sigma4(2, 4, 6), dR(6, 5, -4)),
OpSum(Oed(-1, -2, -3, -4))),
# LLRR and LRRL type
(Op(lLc(0, 1, -1), sigma4bar(2, 0, 3), lL(3, 1, -2),
eRc(4, -3), sigma4(2, 4, 5), eR(5, -4)),
OpSum(Ole(-1, -2, -3, -4))),
(Op(qLc(0, 1, 2, -1), sigma4bar(3, 0, 4), qL(4, 1, 2, -2),
eRc(5, -3), sigma4(3, 5, 6), eR(6, -4)),
OpSum(Oqe(-1, -2, -3, -4))),
(Op(lLc(0, 1, -1), sigma4bar(2, 0, 3), lL(3, 1, -2),
uRc(4, 5, -3), sigma4(2, 4, 6), uR(6, 5, -4)),
OpSum(Olu(-1, -2, -3, -4))),
(Op(lLc(0, 1, -1), sigma4bar(2, 0, 3), lL(3, 1, -2),
dRc(4, 5, -3), sigma4(2, 4, 6), dR(6, 5, -4)),
OpSum(Old(-1, -2, -3, -4))),
(Op(qLc(0, 1, 2, -1), sigma4bar(3, 0, 4), qL(4, 1, 2, -2),
uRc(5, 6, -3), sigma4(3, 5, 7), uR(7, 6, -4)),
OpSum(O1qu(-1, -2, -3, -4))),
(Op(qLc(0, 1, 2, -1), sigma4bar(3, 0, 4),
TSU3(5, 1, 6), qL(4, 6, 2, -2),
uRc(7, 8, -3), sigma4(3, 7, 9),
TSU3(5, 8, 10), uR(9, 10, -4)),
OpSum(O8qu(-1, -2, -3, -4))),
(Op(qLc(0, 1, 2, -1), sigma4bar(3, 0, 4), qL(4, 1, 2, -2),
dRc(5, 6, -3), sigma4(3, 5, 7), dR(7, 6, -4)),
OpSum(O1qd(-1, -2, -3, -4))),
(Op(qLc(0, 1, 2, -1), sigma4bar(3, 0, 4),
TSU3(5, 1, 6), qL(4, 6, 2, -2),
dRc(7, 8, -3), sigma4(3, 7, 9),
TSU3(5, 8, 10), dR(9, 10, -4)),
OpSum(O8qd(-1, -2, -3, -4))),
(Op(lLc(0, 1, -1), eR(0, -2), dRc(2, 3, -3), qL(2, 3, 1, -4)),
OpSum(Oledq(-1, -2, -3, -4))),
(Op(eRc(0, -2), lL(0, 1, -1), qLc(2, 3, 1, -4), dR(2, 3, -3)),
OpSum(Oledqc(-1, -2, -3, -4))),
# LRLR type
(Op(qLc(0, 1, 2, -1), uR(0, 1, -2), epsSU2(2, 3),
qLc(4, 5, 3, -3), dR(4, 5, -4)),
OpSum(O1qud(-1, -2, -3, -4))),
(Op(uRc(0, 1, -2), qL(0, 1, 2, -1), epsSU2(2, 3),
dRc(4, 5, -4), qL(4, 5, 3, -3)),
OpSum(O1qudc(-1, -2, -3, -4))),
(Op(qLc(0, 1, 2, -1), TSU3(3, 1, 4), uR(0, 4, -2),
epsSU2(2, 5),
qLc(6, 7, 5, -3), TSU3(3, 7, 8), dR(0, 8, -4)),
OpSum(O8qud(-1, -2, -3, -4))),
(Op(uRc(0, 4, -2), TSU3(3, 4, 1), qLc(0, 1, 2, -1),
epsSU2(2, 5),
dRc(0, 8, -4), TSU3(3, 8, 7), qL(6, 7, 5, -3)),
OpSum(O8qudc(-1, -2, -3, -4))),
(Op(lLc(0, 1, -1), eR(0, -2), epsSU2(1, 2),
qLc(3, 4, 2, -3), uR(3, 4, -4)),
OpSum(Olequ(-1, -2, -3, -4))),
(Op(eRc(0, -2), lL(0, 1, -1), epsSU2(1, 2),
uRc(3, 4, -4), qL(3, 4, 2, -3)),
OpSum(Olequc(-1, -2, -3, -4))),
(Op(lLc(0, 1, -1), uR(0, 2, -2), epsSU2(1, 3),
qLc(4, 2, 3, -3), eR(4, -4)),
OpSum(Oluqe(-1, -2, -3, -4))),
(Op(uRc(0, 2, -2), lL(0, 1, -1), epsSU2(1, 3),
eRc(4, -4), qLc(4, 2, 3, -3)),
OpSum(Oluqec(-1, -2, -3, -4))),
# \slashed{B} and \slashed{L} type
(Op(epsSU3(0, 1, 2), lLc(3, 4, -1), epsSU2(4, 5),
epsUpDot(3, 6), qLc(6, 0, 5, -2),
dRc(7, 1, -3), epsDown(7, 8), uRc(8, 2, -4)),
OpSum(Olqdu(-1, -2, -3, -4))),
(Op(epsSU3(0, 1, 2), qL(6, 0, 5, -2), epsSU2(5, 4),
epsUp(3, 6), lL(3, 4, -1),
uR(8, 2, -4), epsDownDot(8, 7), dR(7, 1, -3)),
OpSum(Olqduc(-1, -2, -3, -4))),
(Op(epsSU3(0, 1, 2), qLc(3, 1, 4, -1), epsSU2(4, 5),
epsUpDot(3, 6), qLc(6, 2, 5, -2),
eRc(7, -3), epsDown(7, 8), uRc(8, 0, -4)),
OpSum(Oqqeu(-1, -2, -3, -4))),
(Op(epsSU3(0, 1, 2), qL(6, 1, 5, -2), epsSU2(4, 5),
epsUp(6, 3), qL(3, 0, 4, -1),
uR(8, 2, -4), epsDownDot(8, 7), eR(7, -3)),
OpSum(Oqqeuc(-1, -2, -3, -4))),
(Op(epsSU3(0, 1, 2), lLc(3, 4, -1), epsSU2(4, 5),
epsUpDot(3, 6), qLc(6, 0, 5, -2),
qLc(7, 1, 8, -3), epsSU2(8, 9),
epsUpDot(7, 10), qLc(10, 2, 9, -4)),
OpSum(O1lqqq(-1, -2, -3, -4))),
(Op(epsSU3(0, 1, 2), qL(6, 0, 5, -2), epsSU2(4, 5),
epsUp(6, 4), lL(3, 4, -1),
qL(10, 2, 9, -4), epsSU2(8, 9),
epsUp(10, 7), qL(7, 1, 8, -3)),
OpSum(O1lqqqc(-1, -2, -3, -4))),
(Op(epsSU3(0, 1, 2), uRc(3, 0, -1), epsDown(3, 4),
dRc(4, 1, -2), eRc(5, -3), epsDown(5, 6),
uRc(6, 2, -4)),
OpSum(Oudeu(-1, -2, -3, -4))),
(Op(epsSU3(0, 1, 2), dR(4, 1, -2), epsDownDot(4, 3),
uR(3, 0, -1), uR(6, 2, -4), epsDownDot(6, 5),
eR(5, -3)),
OpSum(Oudeuc(-1, -2, -3, -4))),
(Op(epsSU3(0, 1, 2), lLc(3, 4, -1), sigmaSU2(5, 4, 6),
epsSU2(6, 7), epsUpDot(3, 8), qLc(8, 0, 7, -2),
qLc(9, 1, 10, -3), sigmaSU2(5, 10, 11),
epsSU2(11, 12), epsUpDot(9, 13), qLc(13, 2, 12, -4)),
OpSum(O3lqqq(-1, -2, -3, -4))),
(Op(epsSU3(0, 1, 2), epsSU2(6, 7), qL(8, 0, 7, -2),
sigmaSU2(5, 6, 4), epsUp(8, 3), lL(3, 4, -1),
epsSU2(11, 12), qL(13, 2, 12, -4),
sigmaSU2(5, 11, 10), epsUp(13, 9), qL(9, 1, 10, -3)),
OpSum(O3lqqqc(-1, -2, -3, -4))),
# Standard Model dimension 6 operators other than four-fermion
# S type
(Op(D(0, phic(1)), D(0, phi(1)), phic(2), phi(2)),
OpSum(number_op(Fraction(1, 2)) * Ophisq,
-Op(mu2phi()) * Ophi4,
number_op(6) * Op(lambdaphi()) * Ophi,
number_op(Fraction(1, 2)) * Op(ye(0, 1)) * Oephi(0, 1),
number_op(Fraction(1, 2)) * Op(yd(0, 1)) * Odphi(0, 1),
number_op(Fraction(1, 2)) * Op(Vc(0, 1), yu(0, 2)) * Ouphi(1, 2),
number_op(Fraction(1, 2)) * Op(yec(0, 1)) * Oephic(0, 1),
number_op(Fraction(1, 2)) * Op(ydc(0, 1)) * Odphic(0, 1),
number_op(Fraction(1, 2)) * Op(V(0, 1), yuc(0, 2)) * Ouphic(1, 2))),
(Op(phic(0), phi(0), phic(1), phi(1), phic(2), phi(2)),
OpSum(number_op(3) * Ophi)),
(Op(phic(0), phi(0), phic(1), phic(1), phi(2), phi(2)),
OpSum(number_op(3) * Ophi)),
# SVF type
(Op(phic(0), D(1, phi(0)),
lLc(2, 3, -1), sigma4bar(1, 2, 4), lL(4, 3, -2)),
OpSum(- i_op * O1phil(-1, -2))),
(Op(D(1, phic(0)), phi(0),
lLc(2, 3, -2), sigma4bar(1, 2, 4), lL(4, 3, -1)),
OpSum(i_op * O1philc(-1, -2))),
(Op(phic(0), sigmaSU2(1, 0, 2), D(3, phi(2)),
lLc(4, 5, -1), sigma4bar(3, 4, 6),
sigmaSU2(1, 5, 7), lL(6, 7, -2)),
OpSum(- i_op * O3phil(-1, -2))),
(Op(D(3, phic(0)), sigmaSU2(1, 0, 2), phi(2),
lLc(4, 5, -2), sigma4bar(3, 4, 6),
sigmaSU2(1, 5, 7), lL(6, 7, -1)),
OpSum(i_op * O3philc(-1, -2))),
(Op(phic(0), D(1, phi(0)),
qLc(2, 3, 4, -1), sigma4bar(1, 2, 5), qL(5, 3, 4, -2)),
OpSum(- i_op * O1phiq(-1, -2))),
(Op(D(1, phic(0)), phi(0),
qLc(2, 3, 4, -2), sigma4bar(1, 2, 5), qL(5, 3, 4, -1)),
OpSum(i_op * O1phiqc(-1, -2))),
(Op(phic(0), sigmaSU2(1, 0, 2), D(3, phi(2)),
qLc(4, 5, 6, -1), sigma4bar(3, 4, 7),
sigmaSU2(1, 6, 8), qL(7, 5, 8, -2)),
OpSum(- i_op * O3phiq(-1, -2))),
(Op(D(3, phic(0)), sigmaSU2(1, 0, 2), phi(2),
qLc(4, 5, 6, -2), sigma4bar(3, 4, 7),
sigmaSU2(1, 6, 8), qL(7, 5, 8, -1)),
OpSum(i_op * O3phiqc(-1, -2))),
(Op(phic(0), D(1, phi(0)),
eRc(2, -1), sigma4(1, 2, 3), eR(3, -2)),
OpSum(- i_op * O1phie(-1, -2))),
(Op(D(1, phic(0)), phi(0),
eRc(2, -2), sigma4(1, 2, 3), eR(3, -1)),
OpSum(i_op * O1phiec(-1, -2))),
(Op(phic(0), D(1, phi(0)),
dRc(2, 3, -1), sigma4(1, 2, 4), dR(4, 3, -2)),
OpSum(- i_op * O1phid(-1, -2))),
(Op(D(1, phic(0)), phi(0),
dRc(2, 3, -2), sigma4(1, 2, 4), dR(4, 3, -1)),
OpSum(i_op * O1phidc(-1, -2))),
(Op(phic(0), D(1, phi(0)),
uRc(2, 3, -1), sigma4(1, 2, 4), uR(4, 3, -2)),
OpSum(- i_op * O1phiu(-1, -2))),
(Op(D(1, phic(0)), phi(0),
uRc(2, 3, -2), sigma4(1, 2, 4), uR(4, 3, -1)),
OpSum(i_op * O1phiuc(-1, -2))),
(Op(phi(0), epsSU2(0, 1), D(2, phi(1)),
uRc(3, 4, -1), sigma4(2, 3, 5), dR(5, 4, -2)),
OpSum(- i_op * Ophiud(-1, -2))),
(Op(phic(0), epsSU2(0, 1), D(2, phic(1)),
dRc(3, 4, -2), sigma4(2, 3, 5), uR(5, 4, -1)),
OpSum(i_op * Ophiudc(-1, -2))),
# STF type
(Op(lLc(0, 1, -1), sigmaTensor(2, 3, 0, 4), eR(4, -2),
phi(1), bFS(2, 3)),
OpSum(OeB(-1, -2))),
(Op(eRc(4, -2), sigmaTensor(2, 3, 4, 0), lL(0, 1, -1),
phic(1), bFS(2, 3)),
OpSum(OeBc(-1, -2))),
(Op(lLc(0, 1, -1), sigmaTensor(2, 3, 0, 4), eR(4, -2),
sigmaSU2(5, 1, 6), phi(6), wFS(2, 3, 5)),
OpSum(OeW(-1, -2))),
(Op(eRc(4, -2), sigmaTensor(2, 3, 4, 0), lL(0, 1, -1),
sigmaSU2(5, 6, 1), phic(6), wFS(2, 3, 5)),
OpSum(OeWc(-1, -2))),
(Op(qLc(0, 1, 2, -1), sigmaTensor(3, 4, 0, 5), uR(5, 1, -2),
epsSU2(2, 6), phic(6), bFS(3, 4)),
OpSum(OuB(-1, -2))),
(Op(uRc(5, 1, -2), sigmaTensor(3, 4, 5, 0), qL(0, 1, 2, -1),
epsSU2(2, 6), phi(6), bFS(3, 4)),
OpSum(OuBc(-1, -2))),
(Op(qLc(0, 1, 2, -1), sigmaTensor(3, 4, 0, 5), uR(5, 1, -2),
sigmaSU2(6, 2, 7), epsSU2(7, 8), phic(8), wFS(3, 4, 6)),
OpSum(OuW(-1, -2))),
(Op(uRc(5, 1, -2), sigmaTensor(3, 4, 5, 0), qL(0, 1, 2, -1),
sigmaSU2(6, 7, 2), epsSU2(7, 8), phi(8), wFS(3, 4, 6)),
OpSum(OuWc(-1, -2))),
(Op(qLc(0, 1, 2, -1), sigmaTensor(3, 4, 0, 5), dR(5, 1, -2),
phi(2), bFS(3, 4)),
OpSum(OdB(-1, -2))),
(Op(dRc(5, 1, -2), sigmaTensor(3, 4, 5, 0), qL(0, 1, 2, -1),
phic(2), bFS(3, 4)),
OpSum(OdBc(-1, -2))),
(Op(qLc(0, 1, 2, -1), sigmaTensor(3, 4, 0, 5), dR(5, 1, -2),
sigmaSU2(6, 2, 7), phi(7), wFS(3, 4, 6)),
OpSum(OdW(-1, -2))),
(Op(dRc(5, 1, -2), sigmaTensor(3, 4, 5, 0), qL(0, 1, 2, -1),
sigmaSU2(6, 7, 2), phic(7), wFS(3, 4, 6)),
OpSum(OdWc(-1, -2))),
(Op(qLc(0, 1, 2, -1), sigmaTensor(3, 4, 0, 5),
TSU3(6, 1, 7), uR(5, 7, -2),
epsSU2(2, 8), phic(8), gFS(3, 4, 6)),
OpSum(OuG(-1, -2))),
(Op(uRc(5, 7, -2), sigmaTensor(3, 4, 5, 0),
TSU3(6, 7, 1), qL(0, 1, 2, -1),
epsSU2(2, 8), phi(8), gFS(3, 4, 6)),
OpSum(OuGc(-1, -2))),
(Op(qLc(0, 1, 2, -1), sigmaTensor(3, 4, 0, 5),
TSU3(6, 1, 7), dR(5, 7, -2),
phi(2), gFS(3, 4, 6)),
OpSum(OdG(-1, -2))),
(Op(dRc(5, 1, -2), sigmaTensor(3, 4, 5, 0),
TSU3(6, 1, 7), qL(0, 7, 2, -1),
phic(2), gFS(3, 4, 6)),
OpSum(OdGc(-1, -2))),
# SF type
(Op(phic(0), phi(0), lLc(1, 2, -1), phi(2), eR(1, -2)),
OpSum(Oephi(-1, -2))),
(Op(phic(0), phi(0), eRc(1, -2), phic(2), lL(1, 2, -1)),
OpSum(Oephic(-1, -2))),
(Op(phic(0), phi(0), qLc(1, 2, 3, -1), phi(3), dR(1, 2, -2)),
OpSum(Odphi(-1, -2))),
(Op(phic(0), phi(0), dRc(1, 2, -2), phic(3), qL(1, 2, 3, -1)),
OpSum(Odphic(-1, -2))),
(Op(phic(0), phi(0), qLc(1, 2, 3, -1), epsSU2(3, 4),
phic(4), uR(1, 2, -2)),
OpSum(Ouphi(-1, -2))),
(Op(phic(0), phi(0), qLc(1, 2, 3, -1), epsSU2(4, 3),
phic(4), uR(1, 2, -2)),
OpSum(-Ouphi(-1, -2))),
(Op(phic(0), phi(0), uRc(1, 2, -2), qL(1, 2, 3, -1),
epsSU2(3, 4), phi(4)),
OpSum(Ouphic(-1, -2))),
(Op(phic(0), phi(0), uRc(1, 2, -2), qL(1, 2, 3, -1),
epsSU2(4, 3), phi(4)),
OpSum(-Ouphic(-1, -2))),
# Oblique type
(Op(phic(0), D(1, phi(0)), D(1, phic(2)), phi(2)),
OpSum(OphiD)),
(Op(phic(0), phi(0), bFS(1, 2), bFS(1, 2)),
OpSum(OphiB)),
(Op(phic(0), phi(0), eps4(1, 2, 3, 4), bFS(3, 4), bFS(1, 2)),
OpSum(OphiBTilde)),
(Op(phic(0), sigmaSU2(1, 0, 2), phi(2), wFS(3, 4, 1), bFS(3, 4)),
OpSum(OWB)),
(Op(phic(0), sigmaSU2(1, 0, 2), phi(2),
eps4(3, 4, 5, 6), wFS(5, 6, 1), bFS(3, 4)),
OpSum(OWBTilde)),
(Op(phic(0), phi(0), wFS(1, 2, 3), wFS(1, 2, 3)),
OpSum(OphiW)),
(Op(phic(0), phi(0), eps4(1, 2, 4, 5), wFS(4, 5, 3), wFS(1, 2, 3)),
OpSum(OphiWTilde)),
(Op(phic(0), phi(0), gFS(1, 2, 3), gFS(1, 2, 3)),
OpSum(OphiG)),
(Op(phic(0), phi(0), eps4(1, 2, 4, 5), gFS(4, 5, 3), gFS(1, 2, 3)),
OpSum(OphiGTilde)),
# Gauge type
(Op(epsSU2triplets(0, 1, 2),
wFS(3, 4, 0), wFS(4, 5, 1), wFS(5, 3, 2)),
OpSum(OW)),
(Op(epsSU2triplets(0, 1, 2),
eps4(3, 4, 6, 7), wFS(6, 7, 0), wFS(4, 5, 1), wFS(5, 3, 2)),
OpSum(OWTilde)),
(Op(fSU3(0, 1, 2),
gFS(3, 4, 0), gFS(4, 5, 1), gFS(5, 3, 2)),
OpSum(OG)),
(Op(fSU3(0, 1, 2),
eps4(3, 4, 6, 7), gFS(6, 7, 0), gFS(4, 5, 1), gFS(5, 3, 2)),
OpSum(OGTilde)),
# Standard Model dimension 5 operators
(Op(lL(0, 1, -1), epsSU2(1, 2), phi(2),
epsSU2(3, 4), phi(4), epsUp(5, 0), lL(5, 3, -2)),
OpSum(O5(-1, -2))),
(Op(lLc(0, 1, -2), epsSU2(1, 2), phic(2), epsUpDot(0, 3),
epsSU2(4, 5), phic(5), lLc(3, 4, -1)),
OpSum(O5c(-1, -2)))]
rules_basis_defs_dim_4 = [
# Standard Model dimension 4 operators
(Op(D(0, phic(1)), D(0, phi(1))),
OpSum(Okinphi)),
(Op(phic(0), phi(0), phic(1), phi(1)),
OpSum(Ophi4)),
(Op(phic(0), phi(0)),
OpSum(Ophi2)),
(Op(lLc(0, 1, -1), phi(1), eR(0, -2)),
OpSum(Oye(-1, -2))),
(Op(eRc(0, -2), phic(1), lL(0, 1, -1)),
OpSum(Oyec(-1, -2))),
(Op(qLc(0, 1, 2, -1), epsSU2(2, 3), phic(3), uR(0, 1, -2)),
OpSum(Oyu(-1, -2))),
(Op(uRc(0, 1, -2), epsSU2(2, 3), phi(3), qL(0, 1, 2, -1)),
OpSum(Oyuc(-1, -2))),
(Op(qLc(0, 1, 2, -1), phi(2), dR(0, 1, -2)),
OpSum(Oyd(-1, -2))),
(Op(dRc(0, 1, -2), phic(2), qL(0, 1, 2, -1)),
OpSum(Oydc(-1, -2)))]
rules_basis_definitions = rules_basis_defs_dim_6_5 + rules_basis_defs_dim_4
"""
Rules defining the operators in the basis in terms of
Standard Model fields.
"""
latex_basis_coefs = {
# Dimension 4
"Okinphi": r"\alpha_{{kin,\phi}}",
"Ophi4": r"\alpha_{{\phi 4}}",
"Ophi2": r"\alpha_{{\phi 2}}",
"Oye": r"\left(\alpha_{{{{y^e}}}}\right)_{{{}{}}}",
"Oyec": r"\left(\alpha_{{{{y^e}}}}\right)^*_{{{}{}}}",
"Oyd": r"\left(\alpha_{{{{y^d}}}}\right)_{{{}{}}}",
"Oydc": r"\left(\alpha_{{{{y^d}}}}\right)^*_{{{}{}}}",
"Oyu": r"\left(\alpha_{{{{y^u}}}}\right)_{{{}{}}}",
"Oyuc": r"\left(\alpha_{{{{y^u}}}}\right)^*_{{{}{}}}",
# Dimension 5
"O5": r"\frac{{\left(\alpha_5\right)_{{{}{}}}}}{{\Lambda}}",
"O5c": r"\frac{{\left(\alpha_5\right)^*_{{{}{}}}}}{{\Lambda}}",
# Auxiliary
"O5aux": r"\frac{{\left(\alpha^{{AUX}}_5\right)_{{{}{}}}}}{{\Lambda}}",
"O5auxc": r"\frac{{\left(\alpha^{{AUX}}_5\right)^*_{{{}{}}}}}{{\Lambda}}",
"Olqqqaux":
r"\frac{{\left(\alpha^{{AUX}}_{{lqqq}}\right)_{{{}{}{}{}}}}}{{\Lambda^2}}",
"Olqqqauxc":
r"\frac{{\left(\alpha^{{AUX}}_{{lqqq}}\right)^*_{{{}{}{}{}}}}}{{\Lambda^2}}",
# Dimension 6 four-fermion
# LLLL
"O1ll":
(r"\frac{{\left(\alpha^{{(1)}}_{{ll}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O1qq":
(r"\frac{{\left(\alpha^{{(1)}}_{{qq}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O8qq":
(r"\frac{{\left(\alpha^{{(8)}}_{{qq}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O1lq":
(r"\frac{{\left(\alpha^{{(1)}}_{{lq}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O3lq":
(r"\frac{{\left(\alpha^{{(3)}}_{{lq}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
# RRRR
"Oee":
(r"\frac{{\left(\alpha_{{ee}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O1uu":
(r"\frac{{\left(\alpha^{{(1)}}_{{uu}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O1dd":
(r"\frac{{\left(\alpha^{{(1)}}_{{dd}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O1ud":
(r"\frac{{\left(\alpha^{{(1)}}_{{ud}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O8ud":
(r"\frac{{\left(\alpha^{{(8)}}_{{ud}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oeu":
(r"\frac{{\left(\alpha_{{eu}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oed":
(r"\frac{{\left(\alpha_{{ed}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
# LLRR and LRRL
"Ole":
(r"\frac{{\left(\alpha_{{le}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oqe":
(r"\frac{{\left(\alpha_{{qe}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Olu":
(r"\frac{{\left(\alpha_{{lu}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Old":
(r"\frac{{\left(\alpha_{{ld}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O1qu":
(r"\frac{{\left(\alpha^{{(1)}}_{{qu}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O8qu":
(r"\frac{{\left(\alpha^{{(8)}}_{{qu}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O1qd":
(r"\frac{{\left(\alpha^{{(1)}}_{{qd}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O8qd":
(r"\frac{{\left(\alpha^{{(8)}}_{{qd}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oledq":
(r"\frac{{\left(\alpha_{{ledq}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oledqc":
(r"\frac{{\left(\alpha_{{ledq}}\right)^*"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
# LRLR
"O1qud":
(r"\frac{{\left(\alpha^{{(1)}}_{{qud}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O1qudc":
(r"\frac{{\left(\alpha^{{(1)}}_{{qud}}\right)^*"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O8qud":
(r"\frac{{\left(\alpha^{{(8)}}_{{qud}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O8qudc":
(r"\frac{{\left(\alpha^{{(8)}}_{{qud}}\right)^*"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Olequ":
(r"\frac{{\left(\alpha_{{lequ}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Olequc":
(r"\frac{{\left(\alpha_{{lequ}}\right)^*"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oluqe":
(r"\frac{{\left(\alpha_{{luqe}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oluqec":
(r"\frac{{\left(\alpha_{{luqe}}\right)^*"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
# \slashed{B} and \slashed{L} type
"Olqdu":
(r"\frac{{\left(\alpha_{{lqdu}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Olqduc":
(r"\frac{{\left(\alpha_{{lqdu}}\right)^*"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oqqeu":
(r"\frac{{\left(\alpha_{{qqeu}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oqqeuc":
(r"\frac{{\left(\alpha_{{qqeu}}\right)^*"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O1lqqq":
(r"\frac{{\left(\alpha^{{(1)}}_{{lqqq}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O1lqqqc":
(r"\frac{{\left(\alpha^{{(1)}}_{{lqqq}}\right)^*"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oudeu":
(r"\frac{{\left(\alpha_{{udeu}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"Oudeuc":
(r"\frac{{\left(\alpha_{{udeu}}\right)^*"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O3lqqq":
(r"\frac{{\left(\alpha^{{(3)}}_{{lqqq}}\right)"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
"O3lqqqc":
(r"\frac{{\left(\alpha^{{(3)}}_{{lqqq}}\right)^*"
r"_{{{}{}{}{}}}}}{{\Lambda^2}}"),
# Dimesion 6 other than four-fermion
# S type
"Ophi": r"\frac{{\alpha_\phi}}{{\Lambda^2}}",
"Ophisq": r"\frac{{\alpha_{{\phi\square}}}}{{\Lambda^2}}",
# SVF type
"O1phil":
(r"\frac{{\left(\alpha^{{(1)}}_{{\phi l}}\right)"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O1philc":
(r"\frac{{\left(\alpha^{{(1)}}_{{\phi l}}\right)^*"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O3phil":
(r"\frac{{\left(\alpha^{{(3)}}_{{\phi l}}\right)"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O3philc":
(r"\frac{{\left(\alpha^{{(3)}}_{{\phi l}}\right)^*"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O1phiq":
(r"\frac{{\left(\alpha^{{(1)}}_{{\phi q}}\right)"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O1phiqc":
(r"\frac{{\left(\alpha^{{(1)}}_{{\phi q}}\right)^*"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O3phiq":
(r"\frac{{\left(\alpha^{{(3)}}_{{\phi q}}\right)"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O3phiqc":
(r"\frac{{\left(\alpha^{{(3)}}_{{\phi q}}\right)^*"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O1phie":
(r"\frac{{\left(\alpha^{{(1)}}_{{\phi e}}\right)"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O1phiec":
(r"\frac{{\left(\alpha^{{(1)}}_{{\phi e}}\right)^*"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O1phid":
(r"\frac{{\left(\alpha^{{(1)}}_{{\phi d}}\right)"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O1phidc":
(r"\frac{{\left(\alpha^{{(1)}}_{{\phi d}}\right)^*"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O1phiu":
(r"\frac{{\left(\alpha^{{(1)}}_{{\phi u}}\right)"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"O1phiuc":
(r"\frac{{\left(\alpha^{{(1)}}_{{\phi u}}\right)^*"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"Ophiud":
(r"\frac{{\left(\alpha_{{\phi ud}}\right)"
r"_{{{}{}}}}}{{\Lambda^2}}"),
"Ophiudc":
(r"\frac{{\left(\alpha_{{\phi ud}}\right)^*"
r"_{{{}{}}}}}{{\Lambda^2}}"),
# STF type
"OeB": r"\frac{{\left(\alpha_{{eB}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"OeBc": r"\frac{{\left(\alpha_{{eB}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
"OeW": r"\frac{{\left(\alpha_{{eW}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"OeWc": r"\frac{{\left(\alpha_{{eW}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
"OuB": r"\frac{{\left(\alpha_{{uB}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"OuBc": r"\frac{{\left(\alpha_{{uB}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
"OuW": r"\frac{{\left(\alpha_{{uW}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"OuWc": r"\frac{{\left(\alpha_{{uW}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
"OdB": r"\frac{{\left(\alpha_{{dB}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"OdBc": r"\frac{{\left(\alpha_{{dB}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
"OdW": r"\frac{{\left(\alpha_{{dW}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"OdWc": r"\frac{{\left(\alpha_{{dW}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
"OuG": r"\frac{{\left(\alpha_{{uG}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"OuGc": r"\frac{{\left(\alpha_{{uG}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
"OdG": r"\frac{{\left(\alpha_{{dG}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"OdGc": r"\frac{{\left(\alpha_{{dG}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
# SF type
"Oephi":
r"\frac{{\left(\alpha_{{e\phi}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"Oephic":
r"\frac{{\left(\alpha_{{e\phi}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
"Odphi":
r"\frac{{\left(\alpha_{{d\phi}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"Odphic":
r"\frac{{\left(\alpha_{{d\phi}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
"Ouphi":
r"\frac{{\left(\alpha_{{u\phi}}\right)_{{{}{}}}}}{{\Lambda^2}}",
"Ouphic":
r"\frac{{\left(\alpha_{{u\phi}}\right)^*_{{{}{}}}}}{{\Lambda^2}}",
# Oblique type
"OphiD": r"\frac{{\alpha_{{\phi D}}}}{{\Lambda^2}}",
"OphiB": r"\frac{{\alpha_{{\phi B}}}}{{\Lambda^2}}",
"OWB": r"\frac{{\alpha_{{WB}}}}{{\Lambda^2}}",
"OphiW": r"\frac{{\alpha_{{\phi W}}}}{{\Lambda^2}}",
"OphiBTilde": r"\frac{{\alpha_{{\phi\tilde{{B}}}}}}{{\Lambda^2}}",
"OWBTilde": r"\frac{{\alpha_{{W\tilde{{B}}}}}}{{\Lambda^2}}",
"OphiWTilde": r"\frac{{\alpha_{{\phi\tilde{{W}}}}}}{{\Lambda^2}}",
"OphiG": r"\frac{{\alpha_{{\phi G}}}}{{\Lambda^2}}",
"OphiGTilde": r"\frac{{\alpha_{{\phi\tilde{{G}}}}}}{{\Lambda^2}}",
# Gauge type
"OW": r"\frac{{\alpha_W}}{{\Lambda^2}}",
"OWTilde": r"\frac{{\alpha_{{\tilde{{W}}}}}}{{\Lambda^2}}",
"OG": r"\frac{{\alpha_G}}{{\Lambda^2}}",
"OGTilde": r"\frac{{\alpha_{{\tilde{{G}}}}}}{{\Lambda^2}}"}
"""
LaTeX representation of the coefficients of the
operators in the basis.
"""
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Functional workflow
===================
.. image :: _static/functional_workflow_source.svg
The functional workflow follows the following steps:
#. Sanitize (revise data types and xforms) input data, read
associated metadata and discard non-steady state frames.
#. :abbr:`HMC (head-motion correction)` based on ``3dvolreg`` from
AFNI -- :py:func:`hmc`.
#. Skull-stripping of the time-series (AFNI) --
:py:func:`fmri_bmsk_workflow`.
#. Calculate mean time-series, and :abbr:`tSNR (temporal SNR)`.
#. Spatial Normalization to MNI (ANTs) -- :py:func:`epi_mni_align`
#. Extraction of IQMs -- :py:func:`compute_iqms`.
#. Individual-reports generation -- :py:func:`individual_reports`.
This workflow is orchestrated by :py:func:`fmri_qc_workflow`.
"""
from mriqc import config
from nipype.interfaces import io as nio
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
def fmri_qc_workflow(name="funcMRIQC"):
"""
Initialize the (f)MRIQC workflow.
.. workflow::
import os.path as op
from mriqc.workflows.functional import fmri_qc_workflow
from mriqc.testing import mock_config
with mock_config():
wf = fmri_qc_workflow()
"""
from nipype.algorithms.confounds import TSNR, NonSteadyStateDetector
from nipype.interfaces.afni import TStat
from niworkflows.interfaces.header import SanitizeImage
workflow = pe.Workflow(name=name)
mem_gb = config.workflow.biggest_file_gb
dataset = config.workflow.inputs.get("bold", [])
config.loggers.workflow.info(
f"""\
Building functional MRIQC workflow for files: {", ".join(dataset)}."""
)
# Define workflow, inputs and outputs
# 0. Get data, put it in RAS orientation
inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode")
inputnode.iterables = [("in_file", dataset)]
outputnode = pe.Node(
niu.IdentityInterface(
fields=["qc", "mosaic", "out_group", "out_dvars", "out_fd"]
),
name="outputnode",
)
non_steady_state_detector = pe.Node(
NonSteadyStateDetector(), name="non_steady_state_detector"
)
sanitize = pe.Node(SanitizeImage(), name="sanitize", mem_gb=mem_gb * 4.0)
sanitize.inputs.max_32bit = config.execution.float32
# Workflow --------------------------------------------------------
# 1. HMC: head motion correct
hmcwf = hmc()
# Set HMC settings
hmcwf.inputs.inputnode.fd_radius = config.workflow.fd_radius
# 2. Compute mean fmri
mean = pe.Node(
TStat(options="-mean", outputtype="NIFTI_GZ"),
name="mean",
mem_gb=mem_gb * 1.5,
)
skullstrip_epi = fmri_bmsk_workflow()
# EPI to MNI registration
ema = epi_mni_align()
# Compute TSNR using nipype implementation
tsnr = pe.Node(TSNR(), name="compute_tsnr", mem_gb=mem_gb * 2.5)
# 7. Compute IQMs
iqmswf = compute_iqms()
# Reports
repwf = individual_reports()
# fmt: off
workflow.connect([
(inputnode, iqmswf, [("in_file", "inputnode.in_file")]),
(inputnode, sanitize, [("in_file", "in_file")]),
(inputnode, non_steady_state_detector, [("in_file", "in_file")]),
(non_steady_state_detector, sanitize, [("n_volumes_to_discard", "n_volumes_to_discard")]),
(sanitize, hmcwf, [("out_file", "inputnode.in_file")]),
(mean, skullstrip_epi, [("out_file", "inputnode.in_file")]),
(hmcwf, mean, [("outputnode.out_file", "in_file")]),
(hmcwf, tsnr, [("outputnode.out_file", "in_file")]),
(mean, ema, [("out_file", "inputnode.epi_mean")]),
(skullstrip_epi, ema, [("outputnode.out_file", "inputnode.epi_mask")]),
(sanitize, iqmswf, [("out_file", "inputnode.in_ras")]),
(mean, iqmswf, [("out_file", "inputnode.epi_mean")]),
(hmcwf, iqmswf, [("outputnode.out_file", "inputnode.hmc_epi"),
("outputnode.out_fd", "inputnode.hmc_fd")]),
(skullstrip_epi, iqmswf, [("outputnode.out_file", "inputnode.brainmask")]),
(tsnr, iqmswf, [("tsnr_file", "inputnode.in_tsnr")]),
(sanitize, repwf, [("out_file", "inputnode.in_ras")]),
(mean, repwf, [("out_file", "inputnode.epi_mean")]),
(tsnr, repwf, [("stddev_file", "inputnode.in_stddev")]),
(skullstrip_epi, repwf, [("outputnode.out_file", "inputnode.brainmask")]),
(hmcwf, repwf, [("outputnode.out_fd", "inputnode.hmc_fd"),
("outputnode.out_file", "inputnode.hmc_epi")]),
(ema, repwf, [("outputnode.epi_parc", "inputnode.epi_parc"),
("outputnode.report", "inputnode.mni_report")]),
(non_steady_state_detector, iqmswf, [("n_volumes_to_discard", "inputnode.exclude_index")]),
(iqmswf, repwf, [("outputnode.out_file", "inputnode.in_iqms"),
("outputnode.out_dvars", "inputnode.in_dvars"),
("outputnode.outliers", "inputnode.outliers")]),
(hmcwf, outputnode, [("outputnode.out_fd", "out_fd")]),
])
# fmt: on
if config.workflow.fft_spikes_detector:
# fmt: off
workflow.connect([
(iqmswf, repwf, [("outputnode.out_spikes", "inputnode.in_spikes"),
("outputnode.out_fft", "inputnode.in_fft")]),
])
# fmt: on
if config.workflow.ica:
from niworkflows.interfaces.reportlets.segmentation import MELODICRPT
melodic = pe.Node(
MELODICRPT(
no_bet=True,
no_mask=True,
no_mm=True,
compress_report=False,
generate_report=True,
),
name="ICA",
mem_gb=max(mem_gb * 5, 8),
)
# fmt: off
workflow.connect([
(sanitize, melodic, [("out_file", "in_files")]),
(skullstrip_epi, melodic, [("outputnode.out_file", "report_mask")]),
(melodic, repwf, [("out_report", "inputnode.ica_report")])
])
# fmt: on
# Upload metrics
if not config.execution.no_sub:
from mriqc.interfaces.webapi import UploadIQMs
upldwf = pe.Node(UploadIQMs(), name="UploadMetrics")
upldwf.inputs.url = config.execution.webapi_url
upldwf.inputs.strict = config.execution.upload_strict
if config.execution.webapi_port:
upldwf.inputs.port = config.execution.webapi_port
# fmt: off
workflow.connect([
(iqmswf, upldwf, [("outputnode.out_file", "in_iqms")]),
])
# fmt: on
return workflow
def compute_iqms(name="ComputeIQMs"):
"""
Initialize the workflow that actually computes the IQMs.
.. workflow::
from mriqc.workflows.functional import compute_iqms
from mriqc.testing import mock_config
with mock_config():
wf = compute_iqms()
"""
from nipype.algorithms.confounds import ComputeDVARS
from nipype.interfaces.afni import OutlierCount, QualityIndex
from niworkflows.interfaces.bids import ReadSidecarJSON
from mriqc.interfaces import FunctionalQC, IQMFileSink
from mriqc.interfaces.reports import AddProvenance
from mriqc.interfaces.transitional import GCOR
from mriqc.workflows.utils import _tofloat, get_fwhmx
mem_gb = config.workflow.biggest_file_gb
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"in_file",
"in_ras",
"epi_mean",
"brainmask",
"hmc_epi",
"hmc_fd",
"fd_thres",
"in_tsnr",
"metadata",
"exclude_index",
]
),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(
fields=[
"out_file",
"out_dvars",
"outliers",
"out_spikes",
"out_fft",
]
),
name="outputnode",
)
# Set FD threshold
inputnode.inputs.fd_thres = config.workflow.fd_thres
# Compute DVARS
dvnode = pe.Node(
ComputeDVARS(save_plot=False, save_all=True),
name="ComputeDVARS",
mem_gb=mem_gb * 3,
)
# AFNI quality measures
fwhm_interface = get_fwhmx()
fwhm = pe.Node(fwhm_interface, name="smoothness")
# fwhm.inputs.acf = True # add when AFNI >= 16
outliers = pe.Node(
OutlierCount(fraction=True, out_file="outliers.out"),
name="outliers",
mem_gb=mem_gb * 2.5,
)
quality = pe.Node(
QualityIndex(automask=True),
out_file="quality.out",
name="quality",
mem_gb=mem_gb * 3,
)
gcor = pe.Node(GCOR(), name="gcor", mem_gb=mem_gb * 2)
measures = pe.Node(FunctionalQC(), name="measures", mem_gb=mem_gb * 3)
# fmt: off
workflow.connect([
(inputnode, dvnode, [("hmc_epi", "in_file"),
("brainmask", "in_mask")]),
(inputnode, measures, [("epi_mean", "in_epi"),
("brainmask", "in_mask"),
("hmc_epi", "in_hmc"),
("hmc_fd", "in_fd"),
("fd_thres", "fd_thres"),
("in_tsnr", "in_tsnr")]),
(inputnode, fwhm, [("epi_mean", "in_file"),
("brainmask", "mask")]),
(inputnode, quality, [("hmc_epi", "in_file")]),
(inputnode, outliers, [("hmc_epi", "in_file"),
("brainmask", "mask")]),
(inputnode, gcor, [("hmc_epi", "in_file"),
("brainmask", "mask")]),
(dvnode, measures, [("out_all", "in_dvars")]),
(fwhm, measures, [(("fwhm", _tofloat), "in_fwhm")]),
(dvnode, outputnode, [("out_all", "out_dvars")]),
(outliers, outputnode, [("out_file", "outliers")])
])
# fmt: on
# Add metadata
meta = pe.Node(ReadSidecarJSON(), name="metadata", run_without_submitting=True)
addprov = pe.Node(
AddProvenance(modality="bold"),
name="provenance",
run_without_submitting=True,
)
# Save to JSON file
datasink = pe.Node(
IQMFileSink(
modality="bold",
out_dir=str(config.execution.output_dir),
dataset=config.execution.dsname,
),
name="datasink",
run_without_submitting=True,
)
# fmt: off
workflow.connect([
(inputnode, datasink, [("in_file", "in_file"),
("exclude_index", "dummy_trs")]),
(inputnode, meta, [("in_file", "in_file")]),
(inputnode, addprov, [("in_file", "in_file")]),
(meta, datasink, [("subject", "subject_id"),
("session", "session_id"),
("task", "task_id"),
("acquisition", "acq_id"),
("reconstruction", "rec_id"),
("run", "run_id"),
("out_dict", "metadata")]),
(addprov, datasink, [("out_prov", "provenance")]),
(outliers, datasink, [(("out_file", _parse_tout), "aor")]),
(gcor, datasink, [(("out", _tofloat), "gcor")]),
(quality, datasink, [(("out_file", _parse_tqual), "aqi")]),
(measures, datasink, [("out_qc", "root")]),
(datasink, outputnode, [("out_file", "out_file")])
])
# fmt: on
# FFT spikes finder
if config.workflow.fft_spikes_detector:
from .utils import slice_wise_fft
spikes_fft = pe.Node(
niu.Function(
input_names=["in_file"],
output_names=["n_spikes", "out_spikes", "out_fft"],
function=slice_wise_fft,
),
name="SpikesFinderFFT",
)
# fmt: off
workflow.connect([
(inputnode, spikes_fft, [("in_ras", "in_file")]),
(spikes_fft, outputnode, [("out_spikes", "out_spikes"),
("out_fft", "out_fft")]),
(spikes_fft, datasink, [("n_spikes", "spikes_num")])
])
# fmt: on
return workflow
def individual_reports(name="ReportsWorkflow"):
"""
Write out individual reportlets.
.. workflow::
from mriqc.workflows.functional import individual_reports
from mriqc.testing import mock_config
with mock_config():
wf = individual_reports()
"""
from niworkflows.interfaces.plotting import FMRISummary
from mriqc.interfaces import PlotMosaic, PlotSpikes, Spikes
from mriqc.interfaces.reports import IndividualReport
verbose = config.execution.verbose_reports
mem_gb = config.workflow.biggest_file_gb
pages = 5
extra_pages = int(verbose) * 4
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"in_iqms",
"in_ras",
"hmc_epi",
"epi_mean",
"brainmask",
"hmc_fd",
"fd_thres",
"epi_parc",
"in_dvars",
"in_stddev",
"outliers",
"in_spikes",
"in_fft",
"mni_report",
"ica_report",
]
),
name="inputnode",
)
# Set FD threshold
inputnode.inputs.fd_thres = config.workflow.fd_thres
spmask = pe.Node(
niu.Function(
input_names=["in_file", "in_mask"],
output_names=["out_file", "out_plot"],
function=spikes_mask,
),
name="SpikesMask",
mem_gb=mem_gb * 3.5,
)
spikes_bg = pe.Node(
Spikes(no_zscore=True, detrend=False),
name="SpikesFinderBgMask",
mem_gb=mem_gb * 2.5,
)
bigplot = pe.Node(FMRISummary(), name="BigPlot", mem_gb=mem_gb * 3.5)
# fmt: off
workflow.connect([
(inputnode, spikes_bg, [("in_ras", "in_file")]),
(inputnode, spmask, [("in_ras", "in_file")]),
(inputnode, bigplot, [("hmc_epi", "in_func"),
("brainmask", "in_mask"),
("hmc_fd", "fd"),
("fd_thres", "fd_thres"),
("in_dvars", "dvars"),
("epi_parc", "in_segm"),
("outliers", "outliers")]),
(spikes_bg, bigplot, [("out_tsz", "in_spikes_bg")]),
(spmask, spikes_bg, [("out_file", "in_mask")]),
])
# fmt: on
mosaic_mean = pe.Node(
PlotMosaic(out_file="plot_func_mean_mosaic1.svg", cmap="Greys_r"),
name="PlotMosaicMean",
)
mosaic_stddev = pe.Node(
PlotMosaic(out_file="plot_func_stddev_mosaic2_stddev.svg", cmap="viridis"),
name="PlotMosaicSD",
)
mplots = pe.Node(
niu.Merge(
pages
+ extra_pages
+ int(config.workflow.fft_spikes_detector)
+ int(config.workflow.ica)
),
name="MergePlots",
)
rnode = pe.Node(IndividualReport(), name="GenerateReport")
# Link images that should be reported
dsplots = pe.Node(
nio.DataSink(
base_directory=str(config.execution.output_dir),
parameterization=False,
),
name="dsplots",
run_without_submitting=True,
)
# fmt: off
workflow.connect([
(inputnode, rnode, [("in_iqms", "in_iqms")]),
(inputnode, mosaic_mean, [("epi_mean", "in_file")]),
(inputnode, mosaic_stddev, [("in_stddev", "in_file")]),
(mosaic_mean, mplots, [("out_file", "in1")]),
(mosaic_stddev, mplots, [("out_file", "in2")]),
(bigplot, mplots, [("out_file", "in3")]),
(mplots, rnode, [("out", "in_plots")]),
(rnode, dsplots, [("out_file", "@html_report")]),
])
# fmt: on
if config.workflow.fft_spikes_detector:
mosaic_spikes = pe.Node(
PlotSpikes(
out_file="plot_spikes.svg",
cmap="viridis",
title="High-Frequency spikes",
),
name="PlotSpikes",
)
# fmt: off
workflow.connect([
(inputnode, mosaic_spikes, [("in_ras", "in_file"),
("in_spikes", "in_spikes"),
("in_fft", "in_fft")]),
(mosaic_spikes, mplots, [("out_file", "in4")])
])
# fmt: on
if config.workflow.ica:
page_number = 4 + config.workflow.fft_spikes_detector
# fmt: off
workflow.connect([
(inputnode, mplots, [("ica_report", "in%d" % page_number)])
])
# fmt: on
if not verbose:
return workflow
mosaic_zoom = pe.Node(
PlotMosaic(out_file="plot_anat_mosaic1_zoomed.svg", cmap="Greys_r"),
name="PlotMosaicZoomed",
)
mosaic_noise = pe.Node(
PlotMosaic(
out_file="plot_anat_mosaic2_noise.svg",
only_noise=True,
cmap="viridis_r",
),
name="PlotMosaicNoise",
)
# Verbose-reporting goes here
from ..interfaces.viz import PlotContours
plot_bmask = pe.Node(
PlotContours(
display_mode="z",
levels=[0.5],
colors=["r"],
cut_coords=10,
out_file="bmask",
),
name="PlotBrainmask",
)
# fmt: off
workflow.connect([
(inputnode, plot_bmask, [("epi_mean", "in_file"),
("brainmask", "in_contours")]),
(inputnode, mosaic_zoom, [("epi_mean", "in_file"),
("brainmask", "bbox_mask_file")]),
(inputnode, mosaic_noise, [("epi_mean", "in_file")]),
(mosaic_zoom, mplots, [("out_file", "in%d" % (pages + 1))]),
(mosaic_noise, mplots, [("out_file", "in%d" % (pages + 2))]),
(plot_bmask, mplots, [("out_file", "in%d" % (pages + 3))]),
(inputnode, mplots, [("mni_report", "in%d" % (pages + 4))]),
])
# fmt: on
return workflow
def fmri_bmsk_workflow(name="fMRIBrainMask"):
"""
Compute a brain mask for the input :abbr:`fMRI (functional MRI)` dataset.
.. workflow::
from mriqc.workflows.functional import fmri_bmsk_workflow
from mriqc.testing import mock_config
with mock_config():
wf = fmri_bmsk_workflow()
"""
from nipype.interfaces.afni import Automask
workflow = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode")
outputnode = pe.Node(niu.IdentityInterface(fields=["out_file"]), name="outputnode")
afni_msk = pe.Node(Automask(outputtype="NIFTI_GZ"), name="afni_msk")
# Connect brain mask extraction
# fmt: off
workflow.connect([
(inputnode, afni_msk, [("in_file", "in_file")]),
(afni_msk, outputnode, [("out_file", "out_file")])
])
# fmt: on
return workflow
def hmc(name="fMRI_HMC"):
"""
Create a :abbr:`HMC (head motion correction)` workflow for fMRI.
.. workflow::
from mriqc.workflows.functional import hmc
from mriqc.testing import mock_config
with mock_config():
wf = hmc()
"""
from nipype.algorithms.confounds import FramewiseDisplacement
from nipype.interfaces.afni import Calc, Despike, Refit, TShift, Volreg
mem_gb = config.workflow.biggest_file_gb
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(fields=["in_file", "fd_radius", "start_idx", "stop_idx"]),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["out_file", "out_fd"]), name="outputnode"
)
if any(
(
config.workflow.start_idx is not None,
config.workflow.stop_idx is not None,
)
):
drop_trs = pe.Node(Calc(expr="a", outputtype="NIFTI_GZ"), name="drop_trs")
# fmt: off
workflow.connect([
(inputnode, drop_trs, [("in_file", "in_file_a"),
("start_idx", "start_idx"),
("stop_idx", "stop_idx")]),
])
# fmt: on
else:
drop_trs = pe.Node(niu.IdentityInterface(fields=["out_file"]), name="drop_trs")
# fmt: off
workflow.connect([
(inputnode, drop_trs, [("in_file", "out_file")]),
])
# fmt: on
# calculate hmc parameters
hmc = pe.Node(
Volreg(args="-Fourier -twopass", zpad=4, outputtype="NIFTI_GZ"),
name="motion_correct",
mem_gb=mem_gb * 2.5,
)
# Compute the frame-wise displacement
fdnode = pe.Node(
FramewiseDisplacement(normalize=False, parameter_source="AFNI"),
name="ComputeFD",
)
# fmt: off
workflow.connect([
(inputnode, fdnode, [("fd_radius", "radius")]),
(hmc, outputnode, [("out_file", "out_file")]),
(hmc, fdnode, [("oned_file", "in_file")]),
(fdnode, outputnode, [("out_file", "out_fd")]),
])
# fmt: on
# Slice timing correction, despiking, and deoblique
st_corr = pe.Node(TShift(outputtype="NIFTI_GZ"), name="TimeShifts")
deoblique_node = pe.Node(Refit(deoblique=True), name="deoblique")
despike_node = pe.Node(Despike(outputtype="NIFTI_GZ"), name="despike")
if all(
(
config.workflow.correct_slice_timing,
config.workflow.despike,
config.workflow.deoblique,
)
):
# fmt: off
workflow.connect([
(drop_trs, st_corr, [("out_file", "in_file")]),
(st_corr, despike_node, [("out_file", "in_file")]),
(despike_node, deoblique_node, [("out_file", "in_file")]),
(deoblique_node, hmc, [("out_file", "in_file")]),
])
# fmt: on
elif config.workflow.correct_slice_timing and config.workflow.despike:
# fmt: off
workflow.connect([
(drop_trs, st_corr, [("out_file", "in_file")]),
(st_corr, despike_node, [("out_file", "in_file")]),
(despike_node, hmc, [("out_file", "in_file")]),
])
# fmt: on
elif config.workflow.correct_slice_timing and config.workflow.deoblique:
# fmt: off
workflow.connect([
(drop_trs, st_corr, [("out_file", "in_file")]),
(st_corr, deoblique_node, [("out_file", "in_file")]),
(deoblique_node, hmc, [("out_file", "in_file")]),
])
# fmt: on
elif config.workflow.correct_slice_timing:
# fmt: off
workflow.connect([
(drop_trs, st_corr, [("out_file", "in_file")]),
(st_corr, hmc, [("out_file", "in_file")]),
])
# fmt: on
elif config.workflow.despike and config.workflow.deoblique:
# fmt: off
workflow.connect([
(drop_trs, despike_node, [("out_file", "in_file")]),
(despike_node, deoblique_node, [("out_file", "in_file")]),
(deoblique_node, hmc, [("out_file", "in_file")]),
])
# fmt: on
elif config.workflow.despike:
# fmt: off
workflow.connect([
(drop_trs, despike_node, [("out_file", "in_file")]),
(despike_node, hmc, [("out_file", "in_file")]),
])
# fmt: on
elif config.workflow.deoblique:
# fmt: off
workflow.connect([
(drop_trs, deoblique_node, [("out_file", "in_file")]),
(deoblique_node, hmc, [("out_file", "in_file")]),
])
# fmt: on
else:
# fmt: off
workflow.connect([
(drop_trs, hmc, [("out_file", "in_file")]),
])
# fmt: on
return workflow
def epi_mni_align(name="SpatialNormalization"):
"""
Estimate the transform that maps the EPI space into MNI152NLin2009cAsym.
The input epi_mean is the averaged and brain-masked EPI timeseries
Returns the EPI mean resampled in MNI space (for checking out registration) and
the associated "lobe" parcellation in EPI space.
.. workflow::
from mriqc.workflows.functional import epi_mni_align
from mriqc.testing import mock_config
with mock_config():
wf = epi_mni_align()
"""
from nipype.interfaces.ants import ApplyTransforms, N4BiasFieldCorrection
from niworkflows.interfaces.reportlets.registration import (
SpatialNormalizationRPT as RobustMNINormalization,
)
from templateflow.api import get as get_template
# Get settings
testing = config.execution.debug
n_procs = config.nipype.nprocs
ants_nthreads = config.nipype.omp_nthreads
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(fields=["epi_mean", "epi_mask"]),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["epi_mni", "epi_parc", "report"]),
name="outputnode",
)
n4itk = pe.Node(
N4BiasFieldCorrection(dimension=3, copy_header=True), name="SharpenEPI"
)
norm = pe.Node(
RobustMNINormalization(
explicit_masking=False,
flavor="testing" if testing else "precise",
float=config.execution.ants_float,
generate_report=True,
moving="boldref",
num_threads=ants_nthreads,
reference="boldref",
reference_image=str(
get_template("MNI152NLin2009cAsym", resolution=2, suffix="boldref")
),
reference_mask=str(
get_template(
"MNI152NLin2009cAsym",
resolution=2,
desc="brain",
suffix="mask",
)
),
template="MNI152NLin2009cAsym",
template_resolution=2,
),
name="EPI2MNI",
num_threads=n_procs,
mem_gb=3,
)
# Warp segmentation into EPI space
invt = pe.Node(
ApplyTransforms(
float=True,
input_image=str(
get_template(
"MNI152NLin2009cAsym",
resolution=1,
desc="carpet",
suffix="dseg",
)
),
dimension=3,
default_value=0,
interpolation="MultiLabel",
),
name="ResampleSegmentation",
)
# fmt: off
workflow.connect([
(inputnode, invt, [("epi_mean", "reference_image")]),
(inputnode, n4itk, [("epi_mean", "input_image")]),
(inputnode, norm, [("epi_mask", "moving_mask")]),
(n4itk, norm, [("output_image", "moving_image")]),
(norm, invt, [
("inverse_composite_transform", "transforms")]),
(invt, outputnode, [("output_image", "epi_parc")]),
(norm, outputnode, [("warped_image", "epi_mni"),
("out_report", "report")]),
])
# fmt: on
return workflow
def spikes_mask(in_file, in_mask=None, out_file=None):
"""Calculate a mask in which check for :abbr:`EM (electromagnetic)` spikes."""
import os.path as op
import nibabel as nb
import numpy as np
from nilearn.image import mean_img
from nilearn.plotting import plot_roi
from scipy import ndimage as nd
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("{}_spmask{}".format(fname, ext))
out_plot = op.abspath("{}_spmask.pdf".format(fname))
in_4d_nii = nb.load(in_file)
orientation = nb.aff2axcodes(in_4d_nii.affine)
if in_mask:
mask_data = nb.load(in_mask).get_data()
a = np.where(mask_data != 0)
bbox = (
np.max(a[0]) - np.min(a[0]),
np.max(a[1]) - np.min(a[1]),
np.max(a[2]) - np.min(a[2]),
)
longest_axis = np.argmax(bbox)
# Input here is a binarized and intersected mask data from previous section
dil_mask = nd.binary_dilation(
mask_data, iterations=int(mask_data.shape[longest_axis] / 9)
)
rep = list(mask_data.shape)
rep[longest_axis] = -1
new_mask_2d = dil_mask.max(axis=longest_axis).reshape(rep)
rep = [1, 1, 1]
rep[longest_axis] = mask_data.shape[longest_axis]
new_mask_3d = np.logical_not(np.tile(new_mask_2d, rep))
else:
new_mask_3d = np.zeros(in_4d_nii.shape[:3]) == 1
if orientation[0] in ["L", "R"]:
new_mask_3d[0:2, :, :] = True
new_mask_3d[-3:-1, :, :] = True
else:
new_mask_3d[:, 0:2, :] = True
new_mask_3d[:, -3:-1, :] = True
mask_nii = nb.Nifti1Image(
new_mask_3d.astype(np.uint8), in_4d_nii.affine, in_4d_nii.header
)
mask_nii.to_filename(out_file)
plot_roi(mask_nii, mean_img(in_4d_nii), output_file=out_plot)
return out_file, out_plot
def _mean(inlist):
import numpy as np
return np.mean(inlist)
def _parse_tqual(in_file):
import numpy as np
with open(in_file, "r") as fin:
lines = fin.readlines()
return np.mean([float(line.strip()) for line in lines if not line.startswith("++")])
def _parse_tout(in_file):
import numpy as np
data = np.loadtxt(in_file) # pylint: disable=no-member
return data.mean()
|
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
import os
import shutil
import glob
import string
from bs4 import BeautifulSoup
from characters import CHARS
from vertical import VERTICAL_OFFSET
SVG_PATH = "svg"
OUT_PATH = "font"
OUT_NAME = ".txt"
if os.path.exists( SVG_PATH ):
shutil.rmtree( SVG_PATH )
os.mkdir( SVG_PATH )
if os.path.exists( OUT_PATH ):
shutil.rmtree( OUT_PATH )
os.mkdir( OUT_PATH )
def save_data( path , data ):
f = open( path , 'w+')
f.write( data )
f.close()
def get_svg():
files = glob.glob( 'svgff' + os.sep + '*.svg' )
for j in files:
name = j.split( os.sep )[1]
name = ''.join( name.split( '4th february - ' ) )
name = ''.join( name.split( 'Betatype - ' ) )
name = ''.join( name.split( 'HVD Fonts - ' ) )
name = ''.join( name.split( 'Great Lakes Lettering - ' ) )
name = ''.join( name.split( 'Lettering Inc - ' ) )
name = ''.join( name.split( 'LiebeFonts - ' ) )
name = ''.join( name.split( 'Magpie Paper Works - ' ) )
name = ''.join( name.split( 'Mika Melvas - ' ) )
name = ''.join( name.split( 'Reserves - ' ) )
name = ''.join( name.split( 'Sudtipos - ' ) )
name = ''.join( name.split( 'Typadelic - ' ) )
name = ''.join( name.split( 'Mika Melvas - ' ) )
name = ''.join( name.split( 'Emily Lime - ' ) )
name = '_'.join( name.split( '-' ) )
name = ''.join( name.split( 'Std' ) )
name = ''.join( name.split( 'JB' ) )
name = ''.join( name.split( 'MT' ) )
name = ''.join( name.split( 'ICG' ) )
name = ''.join( name.split( 'LT' ) )
name = ''.join( name.split( 'ITC' ) )
name = ''.join( name.split( 'CYR' ) )
name = ''.join( name.split( 'NF' ) )
name = ''.join( name.split( 'NF' ) )
name = name.lower()
name = '_bold.'.join( name.split( '_bd.' ) )
name = '_bolditalic.'.join( name.split( '_bdit.' ) )
name = '_italic.'.join( name.split( '_ita.' ) )
name = '_italic.'.join( name.split( '_it.' ) )
name = '_bolditalic.'.join( name.split( '_boldit.' ) )
name = ''.join( name.split( '_31ab' ) )
name = ''.join( name.split( '_smallcaps' ) )
name = ''.join( name.split( 'smallcaps' ) )
name = ''.join( name.split( '_stripescaps' ) )
name = ''.join( name.split( '_33bc' ) )
name = ''.join( name.split( '220' ) )
name = '.'.join( name.split( '_regular.' ) )
name = '_bookitalic.'.join( name.split( '_bookit.' ) )
name = '_demiitalic.'.join( name.split( '_demiit.' ) )
name = '_medium.'.join( name.split( '_med.' ) )
name = 'bayernhandschrift'.join( name.split( 'bayern' ) )
name = 'bodoniclassiccondensed_roman'.join( name.split( 'bodonclaconrom' ) )
name = 'bouncescript'.join( name.split( 'bounce script' ) )
name = 'comicsans.svg'.join( name.split( 'comic.svg' ) )
name = 'comicsans_bold.svg'.join( name.split( 'comicbd.svg' ) )
name = 'frostedillustrations'.join( name.split( 'frosted illustrations' ) )
name = 'melanylane_bold'.join( name.split( 'melanylanebold' ) )
name = 'rodeqaslab4f'.join( name.split( 'rodeqa slab 4f' ) )
name = 'spumonilp'.join( name.split( 'spumoni_lp' ) )
name = 'wendylpmedium'.join( name.split( 'wendy_lp_medium' ) )
name = 'thirstyroughbol'.join( name.split( 'thirstyroughbold' ) )
name = 'thirstyroughreg'.join( name.split( 'thirstyroughregular' ) )
name = 'mahsurisans_bold.svg'.join( name.split( 'mahsurisans_bol.svg' ) )
name = 'mrsheffield.svg'.join( name.split( 'mr sheffield.svg' ) )
name = 'naiveinline_bold.svg'.join( name.split( 'naive_inline_bold_29mars.svg' ) )
name = 'naiveinline_medium.svg'.join( name.split( 'naive_inline_regular_29mars.svg' ) )
shutil.copy( j , SVG_PATH + os.sep + name )
def uconvert( value ):
return str( ord( value ) )
def gconvert( value ):
return "".join( value.split("_") )
def svg_to_txt():
files = glob.glob( SVG_PATH + os.sep + '*.svg' )
#target = 8
#target_count = 0
font_id = ""
fonts = {}
for j in files:
#target_count = target_count + 1
#if target_count > target:
# exit()
#if target_count < target:
# continue
file_name = j[4:-4]
file_name = ' '.join( file_name.split( '_' ) )
font_id = '_'.join( file_name.split( ' ' ) )
print( file_name )
svg_data = open( j , 'r' ).read()
soup = BeautifulSoup( svg_data , 'lxml' )
out = ''
#track what font glyphs exist and only add in dependent kerning.
font_glyphs = {}
font_ligatures = {}
#OFFSET EXTRACTION
if VERTICAL_OFFSET.has_key( font_id ):
target = VERTICAL_OFFSET[ font_id ]
if target.has_key( 'top' ):
out += '0|top|' + str( target[ 'top' ] ) + '\n'
if target.has_key( 'middle' ):
out += '0|middle|' + str( target[ 'middle' ] ) + '\n'
if target.has_key( 'bottom' ):
out += '0|bottom|' + str( target[ 'bottom' ] ) + '\n'
#PROPERTY EXTRACTION
default = 0
#find missing-glyph elements
target = soup.find_all('missing-glyph')
for i in target:
if i.has_attr('horiz-adv-x'):
out += '0|missing|' + i['horiz-adv-x'] + '\n'
#find font elements
target = soup.find_all('font')
for i in target:
if i.has_attr('horiz-adv-x'):
out += '0|default|' + i['horiz-adv-x'] + '\n'
default = i['horiz-adv-x']
if i.has_attr('id'):
out += '0|id|' + i['id'] + '\n'
#find font-face elements
target = soup.find_all('font-face')
for i in target:
if i.has_attr('font-family') and i['font-family'] != "":
out += '0|family|' + i['font-family'] + '\n'
if i.has_attr('panose-1'):
out += '0|panose|' + i['panose-1'] + '\n'
if i.has_attr('descent'):
out += '0|descent|' + i['descent'] + '\n'
if i.has_attr('ascent'):
out += '0|ascent|' + i['ascent'] + '\n'
if i.has_attr('units-per-em'):
out += '0|units|' + i['units-per-em'] + '\n'
if i.has_attr('alphabetic'):
out += '0|alphabetic|' + i['alphabetic'] + '\n'
if i.has_attr('font-style'):
out += '0|font-style|' + i['font-style'] + '\n'
if i.has_attr('font-weight'):
out += '0|font-weight|' + i['font-weight'] + '\n'
if i.has_attr('font-stretch'):
out += '0|font-stretch|' + i['font-stretch'] + '\n'
if i.has_attr('cap-height'):
out += '0|cap-height|' + i['cap-height'] + '\n'
if i.has_attr('x-height'):
out += '0|x-height|' + i['x-height'] + '\n'
if i.has_attr('underline-position'):
out += '0|underline-position|' + i['underline-position'] + '\n'
if i.has_attr('underline-thickness'):
out += '0|underline-thickness|' + i['underline-thickness'] + '\n'
#GLYPH EXTRACTION
#find glyph elements
target = soup.find_all('glyph')
for i in target:
if i.has_attr('unicode'):
if len( i['unicode'] ) > 1:
unicode_str = i['unicode']
else:
unicode_str = str( ord( i['unicode'] ) )
if CHARS.has_key( unicode_str ) and CHARS[ unicode_str ] != 1:
unicode_str = CHARS[ unicode_str ]
if CHARS.has_key( unicode_str ) == False:
#print missing chars for whitelist inclusion
if i.has_attr('glyph-name'):
print( 'CHARS[ "' + unicode_str + '" ] = 1' )
else:
print( 'CHARS[ "' + unicode_str + '" ] = 1 ' )
continue
#normal chars
if CHARS[ unicode_str ] == 1:
if i.has_attr('horiz-adv-x') and i.has_attr('d'):
out += '1|' + unicode_str + '|' + i['horiz-adv-x'] + '|' + i['d'] + '\n'
elif i.has_attr('d') and i.has_attr('horiz-adv-x') == False:
out += '1|' + unicode_str + '|' + str( default ) + '|' + i['d'] + '\n'
elif i.has_attr('d') == False and i.has_attr('horiz-adv-x') == True:
out += '1|' + unicode_str + '|' + i['horiz-adv-x'] + '|\n'
font_glyphs[ unicode_str ] = 1
#ligatures
else:
if i.has_attr('d') and i.has_attr('horiz-adv-x'):
out += '1|' + CHARS[ unicode_str ] + '|' + i['horiz-adv-x'] + '|' + i['d'] + '\n'
elif i.has_attr('d') and i.has_attr('horiz-adv-x') == False:
out += '1|' + CHARS[ unicode_str ] + '|' + str( default ) + '|' + i['d'] + '\n'
elif i.has_attr('d') == False and i.has_attr('horiz-adv-x') == True:
out += '1|' + CHARS[ unicode_str ] + '|' + i['horiz-adv-x'] + '|\n'
font_glyphs[ unicode_str ] = 1
font_glyphs[ CHARS[ unicode_str ] ] = 1
font_ligatures[ CHARS[ unicode_str ] ] = 1
#KERNING EXTRACTION
target = soup.find_all('hkern')
for i in target:
#print( i )
char_1 = None
char_2 = None
char_offset = 0
if i.has_attr('u1') and i.has_attr('u2') and i.has_attr('k'):
#print( i['u1'] + ":" + i['u2'] + ":" + i['k'] )
if "," in i['u1'] and len( i['u1'] ) > 1:
char_1 = i['u1'].split( ',' )
char_1 = map( uconvert , char_1 )
else:
char_1 = map( uconvert , [ i['u1'] ] )
if "," in i['u2'] and len( i['u2'] ) > 1:
char_2 = i['u2'].split( ',' )
char_2 = map( uconvert , char_2 )
else:
char_2 = map( uconvert , [ i['u2'] ] )
char_offset = i['k']
elif i.has_attr('u1') and i.has_attr('g2') and i.has_attr('k'):
#print( i['u1'] + ":" + i['g2'] + ":" + i['k'] )
if "," in i['u1'] and len( i['u1'] ) > 1:
char_1 = i['u1'].split( ',' )
char_1 = map( uconvert , char_1 )
else:
char_1 = map( uconvert , [ i['u1'] ] )
if "," in i['g2'] and len( i['g2'] ) > 1:
char_2 = i['g2'].split( ',' )
char_2 = map( gconvert , char_2 )
else:
char_2 = map( gconvert , [ i['g2'] ] )
char_offset = i['k']
elif i.has_attr('g1') and i.has_attr('u2') and i.has_attr('k'):
#print( i['g1'] + ":" + i['u2'] + ":" + i['k'] )
if "," in i['g1'] and len( i['g1'] ) > 1:
char_1 = i['g1'].split( ',' )
char_1 = map( gconvert , char_1 )
else:
char_1 = map( gconvert , [ i['g1'] ] )
if "," in i['u2'] and len( i['u2'] ) > 1:
char_2 = i['u2'].split( ',' )
char_2 = map( uconvert , char_2 )
else:
char_2 = map( uconvert , [ i['u2'] ] )
char_offset = i['k']
elif i.has_attr('g1') and i.has_attr('g2') and i.has_attr('k'):
#print( i['g1'] + ":" + i['g2'] + ":" + i['k'] )
if "," in i['g1'] and len( i['g1'] ) > 1:
char_1 = i['g1'].split( ',' )
char_1 = map( gconvert , char_1 )
else:
char_1 = map( gconvert , [ i['g1'] ] )
if "," in i['g2'] and len( i['g2'] ) > 1:
char_2 = i['g2'].split( ',' )
char_2 = map( gconvert , char_2 )
else:
char_2 = map( gconvert , [ i['g2'] ] )
char_offset = i['k']
else:
print( "unknown kerning " )
print( i )
exit()
for j in char_1:
c_1 = j
if CHARS.has_key( c_1 ) and CHARS[ c_1 ] != 1:
c_1 = CHARS[ c_1 ]
if font_glyphs.has_key( c_1 ) == True and CHARS.has_key( c_1 ) == True:
for k in char_2:
c_2 = k
if CHARS.has_key( c_2 ) and CHARS[ c_2 ] != 1:
c_2 = CHARS[ c_2 ]
if font_glyphs.has_key( c_2 ) == True and CHARS.has_key( c_2 ) == True:
out += '2|' + c_1 + '|' + c_2 + '|' + char_offset + '\n'
out += '3'
for i in font_ligatures:
out += '|' + i
fonts[ font_id ] = True
save_data( OUT_PATH + os.sep + font_id.lower() + OUT_NAME , out.encode('utf-8') )
font_id = ""
get_svg()
svg_to_txt()
|
|
#!/usr/bin/env python
# Copyright 2018, Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testable usage examples for Google Cloud Bigtable API wrapper
Each example function takes a ``client`` argument (which must be an instance
of :class:`google.cloud.bigtable.client.Client`) and uses it to perform a task
with the API.
To facilitate running the examples as system tests, each example is also passed
a ``to_delete`` list; the function adds to the list any objects created which
need to be deleted during teardown.
.. note::
This file is under progress and will be updated with more guidance from
the team. Unit tests will be added with guidance from the team.
"""
import datetime
import pytest
from google.api_core.exceptions import DeadlineExceeded
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import TooManyRequests
from google.api_core.exceptions import ServiceUnavailable
from test_utils.system import unique_resource_id
from test_utils.retry import RetryErrors
from google.cloud._helpers import UTC
from google.cloud.bigtable import Client
from google.cloud.bigtable import enums
UNIQUE_SUFFIX = unique_resource_id("-")
INSTANCE_ID = "snippet-tests" + UNIQUE_SUFFIX
CLUSTER_ID = "clus-1-" + UNIQUE_SUFFIX
APP_PROFILE_ID = "app-prof" + UNIQUE_SUFFIX
TABLE_ID = "tabl-1" + UNIQUE_SUFFIX
ROUTING_POLICY_TYPE = enums.RoutingPolicyType.ANY
LOCATION_ID = "us-central1-f"
ALT_LOCATION_ID = "us-central1-a"
PRODUCTION = enums.Instance.Type.PRODUCTION
SERVER_NODES = 3
STORAGE_TYPE = enums.StorageType.SSD
LABEL_KEY = u"python-snippet"
LABEL_STAMP = (
datetime.datetime.utcnow()
.replace(microsecond=0, tzinfo=UTC)
.strftime("%Y-%m-%dt%H-%M-%S")
)
LABELS = {LABEL_KEY: str(LABEL_STAMP)}
INSTANCES_TO_DELETE = []
retry_429_503 = RetryErrors((ServiceUnavailable, TooManyRequests), max_tries=9)
retry_504 = RetryErrors(DeadlineExceeded, max_tries=4)
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE = None
TABLE = None
def setup_module():
client = Config.CLIENT = Client(admin=True)
Config.INSTANCE = client.instance(
INSTANCE_ID, instance_type=PRODUCTION, labels=LABELS
)
cluster = Config.INSTANCE.cluster(
CLUSTER_ID,
location_id=LOCATION_ID,
serve_nodes=SERVER_NODES,
default_storage_type=STORAGE_TYPE,
)
operation = Config.INSTANCE.create(clusters=[cluster])
# We want to make sure the operation completes.
operation.result(timeout=100)
Config.TABLE = Config.INSTANCE.table(TABLE_ID)
retry_504(Config.TABLE.create)()
def teardown_module():
retry_429_503(Config.INSTANCE.delete)()
for instance in INSTANCES_TO_DELETE:
try:
retry_429_503(instance.delete)()
except NotFound:
pass
def test_bigtable_create_instance():
# [START bigtable_api_create_prod_instance]
from google.cloud.bigtable import Client
from google.cloud.bigtable import enums
my_instance_id = "inst-my-" + UNIQUE_SUFFIX
my_cluster_id = "clus-my-" + UNIQUE_SUFFIX
location_id = "us-central1-f"
serve_nodes = 1
storage_type = enums.StorageType.SSD
production = enums.Instance.Type.PRODUCTION
labels = {"prod-label": "prod-label"}
client = Client(admin=True)
instance = client.instance(my_instance_id, instance_type=production, labels=labels)
cluster = instance.cluster(
my_cluster_id,
location_id=location_id,
serve_nodes=serve_nodes,
default_storage_type=storage_type,
)
operation = instance.create(clusters=[cluster])
# We want to make sure the operation completes.
operation.result(timeout=100)
# [END bigtable_api_create_prod_instance]
try:
assert instance.exists()
finally:
retry_429_503(instance.delete)()
def test_bigtable_create_additional_cluster():
# [START bigtable_api_create_cluster]
from google.cloud.bigtable import Client
from google.cloud.bigtable import enums
# Assuming that there is an existing instance with `INSTANCE_ID`
# on the server already.
# to create an instance see
# 'https://cloud.google.com/bigtable/docs/creating-instance'
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
cluster_id = "clus-my-" + UNIQUE_SUFFIX
location_id = "us-central1-a"
serve_nodes = 1
storage_type = enums.StorageType.SSD
cluster = instance.cluster(
cluster_id,
location_id=location_id,
serve_nodes=serve_nodes,
default_storage_type=storage_type,
)
operation = cluster.create()
# We want to make sure the operation completes.
operation.result(timeout=100)
# [END bigtable_api_create_cluster]
try:
assert cluster.exists()
finally:
retry_429_503(cluster.delete)()
def test_bigtable_create_reload_delete_app_profile():
import re
# [START bigtable_api_create_app_profile]
from google.cloud.bigtable import Client
from google.cloud.bigtable import enums
routing_policy_type = enums.RoutingPolicyType.ANY
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
description = "routing policy-multy"
app_profile = instance.app_profile(
app_profile_id=APP_PROFILE_ID,
routing_policy_type=routing_policy_type,
description=description,
cluster_id=CLUSTER_ID,
)
app_profile = app_profile.create(ignore_warnings=True)
# [END bigtable_api_create_app_profile]
# [START bigtable_api_app_profile_name]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
app_profile = instance.app_profile(APP_PROFILE_ID)
app_profile_name = app_profile.name
# [END bigtable_api_app_profile_name]
_profile_name_re = re.compile(
r"^projects/(?P<project>[^/]+)/"
r"instances/(?P<instance>[^/]+)/"
r"appProfiles/(?P<appprofile_id>"
r"[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$"
)
assert _profile_name_re.match(app_profile_name)
# [START bigtable_api_app_profile_exists]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
app_profile = instance.app_profile(APP_PROFILE_ID)
app_profile_exists = app_profile.exists()
# [END bigtable_api_app_profile_exists]
assert app_profile_exists
# [START bigtable_api_reload_app_profile]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
app_profile = instance.app_profile(APP_PROFILE_ID)
app_profile.reload()
# [END bigtable_api_reload_app_profile]
assert app_profile.routing_policy_type == ROUTING_POLICY_TYPE
# [START bigtable_api_update_app_profile]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
app_profile = instance.app_profile(APP_PROFILE_ID)
app_profile.reload()
description = "My new app profile"
app_profile.description = description
app_profile.update()
# [END bigtable_api_update_app_profile]
assert app_profile.description == description
# [START bigtable_api_delete_app_profile]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
app_profile = instance.app_profile(APP_PROFILE_ID)
app_profile.reload()
app_profile.delete(ignore_warnings=True)
# [END bigtable_api_delete_app_profile]
assert not app_profile.exists()
def test_bigtable_list_instances():
# [START bigtable_api_list_instances]
from google.cloud.bigtable import Client
client = Client(admin=True)
(instances_list, failed_locations_list) = client.list_instances()
# [END bigtable_api_list_instances]
assert len(instances_list) > 0
def test_bigtable_list_clusters_on_instance():
# [START bigtable_api_list_clusters_on_instance]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
(clusters_list, failed_locations_list) = instance.list_clusters()
# [END bigtable_api_list_clusters_on_instance]
assert len(clusters_list) > 0
def test_bigtable_list_clusters_in_project():
# [START bigtable_api_list_clusters_in_project]
from google.cloud.bigtable import Client
client = Client(admin=True)
(clusters_list, failed_locations_list) = client.list_clusters()
# [END bigtable_api_list_clusters_in_project]
assert len(clusters_list) > 0
def test_bigtable_list_app_profiles():
app_profile = Config.INSTANCE.app_profile(
app_profile_id="app-prof-" + UNIQUE_SUFFIX,
routing_policy_type=enums.RoutingPolicyType.ANY,
)
app_profile = app_profile.create(ignore_warnings=True)
# [START bigtable_api_list_app_profiles]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
app_profiles_list = instance.list_app_profiles()
# [END bigtable_api_list_app_profiles]
try:
assert len(app_profiles_list) > 0
finally:
retry_429_503(app_profile.delete)(ignore_warnings=True)
def test_bigtable_instance_exists():
# [START bigtable_api_check_instance_exists]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
instance_exists = instance.exists()
# [END bigtable_api_check_instance_exists]
assert instance_exists
def test_bigtable_cluster_exists():
# [START bigtable_api_check_cluster_exists]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
cluster = instance.cluster(CLUSTER_ID)
cluster_exists = cluster.exists()
# [END bigtable_api_check_cluster_exists]
assert cluster_exists
def test_bigtable_reload_instance():
# [START bigtable_api_reload_instance]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
instance.reload()
# [END bigtable_api_reload_instance]
assert instance.type_ == PRODUCTION.value
def test_bigtable_reload_cluster():
# [START bigtable_api_reload_cluster]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
cluster = instance.cluster(CLUSTER_ID)
cluster.reload()
# [END bigtable_api_reload_cluster]
assert cluster.serve_nodes == SERVER_NODES
def test_bigtable_update_instance():
# [START bigtable_api_update_instance]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
display_name = "My new instance"
instance.display_name = display_name
instance.update()
# [END bigtable_api_update_instance]
assert instance.display_name == display_name
def test_bigtable_update_cluster():
# [START bigtable_api_update_cluster]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
cluster = instance.cluster(CLUSTER_ID)
cluster.serve_nodes = 4
cluster.update()
# [END bigtable_api_update_cluster]
assert cluster.serve_nodes == 4
def test_bigtable_cluster_disable_autoscaling():
# [START bigtable_api_cluster_disable_autoscaling]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
# Create a cluster with autoscaling enabled
cluster = instance.cluster(
CLUSTER_ID, min_serve_nodes=1, max_serve_nodes=2, cpu_utilization_percent=10
)
instance.create(clusters=[cluster])
# Disable autoscaling
cluster.disable_autoscaling(serve_nodes=4)
# [END bigtable_api_cluster_disable_autoscaling]
assert cluster.serve_nodes == 4
def test_bigtable_create_table():
# [START bigtable_api_create_table]
from google.api_core import exceptions
from google.api_core import retry
from google.cloud.bigtable import Client
from google.cloud.bigtable import column_family
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
table = instance.table("table_my")
# Define the GC policy to retain only the most recent 2 versions.
max_versions_rule = column_family.MaxVersionsGCRule(2)
# Could include other retriable exception types
# Could configure deadline, etc.
predicate_504 = retry.if_exception_type(exceptions.DeadlineExceeded)
retry_504 = retry.Retry(predicate_504)
retry_504(table.create)(column_families={"cf1": max_versions_rule})
# [END bigtable_api_create_table]
try:
assert table.exists()
finally:
retry_429_503(table.delete)()
def test_bigtable_list_tables():
# [START bigtable_api_list_tables]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
tables_list = instance.list_tables()
# [END bigtable_api_list_tables]
# Check if returned list has expected table
table_names = [table.name for table in tables_list]
assert Config.TABLE.name in table_names
def test_bigtable_delete_cluster():
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
cluster_id = "clus-my-" + UNIQUE_SUFFIX
serve_nodes = 1
cluster = instance.cluster(
cluster_id,
location_id=ALT_LOCATION_ID,
serve_nodes=serve_nodes,
default_storage_type=STORAGE_TYPE,
)
operation = cluster.create()
# We want to make sure the operation completes.
operation.result(timeout=1000)
# [START bigtable_api_delete_cluster]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
cluster_to_delete = instance.cluster(cluster_id)
cluster_to_delete.delete()
# [END bigtable_api_delete_cluster]
assert not cluster_to_delete.exists()
def test_bigtable_delete_instance():
from google.cloud.bigtable import Client
client = Client(admin=True)
instance_id = "snipt-inst-del" + UNIQUE_SUFFIX
instance = client.instance(instance_id, instance_type=PRODUCTION, labels=LABELS)
serve_nodes = 1
cluster = instance.cluster(
"clus-to-delete" + UNIQUE_SUFFIX,
location_id=ALT_LOCATION_ID,
serve_nodes=serve_nodes,
default_storage_type=STORAGE_TYPE,
)
operation = instance.create(clusters=[cluster])
# We want to make sure the operation completes.
operation.result(timeout=100)
# Make sure this instance gets deleted after the test case.
INSTANCES_TO_DELETE.append(instance)
# [START bigtable_api_delete_instance]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance_to_delete = client.instance(instance_id)
instance_to_delete.delete()
# [END bigtable_api_delete_instance]
assert not instance_to_delete.exists()
# Skip deleting it during module teardown if the assertion succeeds.
INSTANCES_TO_DELETE.remove(instance)
def test_bigtable_test_iam_permissions():
# [START bigtable_api_test_iam_permissions]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
instance.reload()
permissions = ["bigtable.clusters.create", "bigtable.tables.create"]
permissions_allowed = instance.test_iam_permissions(permissions)
# [END bigtable_api_test_iam_permissions]
assert permissions_allowed == permissions
def test_bigtable_set_iam_policy_then_get_iam_policy():
service_account_email = Config.CLIENT._credentials.service_account_email
# [START bigtable_api_set_iam_policy]
from google.cloud.bigtable import Client
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
instance.reload()
new_policy = Policy()
new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)]
policy_latest = instance.set_iam_policy(new_policy)
# [END bigtable_api_set_iam_policy]
assert len(policy_latest.bigtable_admins) > 0
# [START bigtable_api_get_iam_policy]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
policy = instance.get_iam_policy()
# [END bigtable_api_get_iam_policy]
assert len(policy.bigtable_admins) > 0
def test_bigtable_project_path():
import re
# [START bigtable_api_project_path]
from google.cloud.bigtable import Client
client = Client(admin=True)
project_path = client.project_path
# [END bigtable_api_project_path]
def test_bigtable_table_data_client():
# [START bigtable_api_table_data_client]
from google.cloud.bigtable import Client
client = Client(admin=True)
table_data_client = client.table_data_client
# [END bigtable_api_table_data_client]
def test_bigtable_table_admin_client():
# [START bigtable_api_table_admin_client]
from google.cloud.bigtable import Client
client = Client(admin=True)
table_admin_client = client.table_admin_client
# [END bigtable_api_table_admin_client]
def test_bigtable_instance_admin_client():
# [START bigtable_api_instance_admin_client]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance_admin_client = client.instance_admin_client
# [END bigtable_api_instance_admin_client]
def test_bigtable_admins_policy():
service_account_email = Config.CLIENT._credentials.service_account_email
# [START bigtable_api_admins_policy]
from google.cloud.bigtable import Client
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
instance.reload()
new_policy = Policy()
new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)]
policy_latest = instance.set_iam_policy(new_policy)
policy = policy_latest.bigtable_admins
# [END bigtable_api_admins_policy]
assert len(policy) > 0
def test_bigtable_readers_policy():
service_account_email = Config.CLIENT._credentials.service_account_email
# [START bigtable_api_readers_policy]
from google.cloud.bigtable import Client
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
instance.reload()
new_policy = Policy()
new_policy[BIGTABLE_READER_ROLE] = [Policy.service_account(service_account_email)]
policy_latest = instance.set_iam_policy(new_policy)
policy = policy_latest.bigtable_readers
# [END bigtable_api_readers_policy]
assert len(policy) > 0
def test_bigtable_users_policy():
service_account_email = Config.CLIENT._credentials.service_account_email
# [START bigtable_api_users_policy]
from google.cloud.bigtable import Client
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
instance.reload()
new_policy = Policy()
new_policy[BIGTABLE_USER_ROLE] = [Policy.service_account(service_account_email)]
policy_latest = instance.set_iam_policy(new_policy)
policy = policy_latest.bigtable_users
# [END bigtable_api_users_policy]
assert len(policy) > 0
def test_bigtable_viewers_policy():
service_account_email = Config.CLIENT._credentials.service_account_email
# [START bigtable_api_viewers_policy]
from google.cloud.bigtable import Client
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
instance.reload()
new_policy = Policy()
new_policy[BIGTABLE_VIEWER_ROLE] = [Policy.service_account(service_account_email)]
policy_latest = instance.set_iam_policy(new_policy)
policy = policy_latest.bigtable_viewers
# [END bigtable_api_viewers_policy]
assert len(policy) > 0
def test_bigtable_instance_name():
import re
# [START bigtable_api_instance_name]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
instance_name = instance.name
# [END bigtable_api_instance_name]
def test_bigtable_cluster_name():
import re
# [START bigtable_api_cluster_name]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
cluster = instance.cluster(CLUSTER_ID)
cluster_name = cluster.name
# [END bigtable_api_cluster_name]
def test_bigtable_instance_from_pb():
# [START bigtable_api_instance_from_pb]
from google.cloud.bigtable import Client
from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
name = instance.name
instance_pb = data_v2_pb2.Instance(
name=name, display_name=INSTANCE_ID, type=PRODUCTION, labels=LABELS
)
instance2 = instance.from_pb(instance_pb, client)
# [END bigtable_api_instance_from_pb]
assert instance2.name == instance.name
def test_bigtable_cluster_from_pb():
# [START bigtable_api_cluster_from_pb]
from google.cloud.bigtable import Client
from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
cluster = instance.cluster(CLUSTER_ID)
name = cluster.name
cluster_state = cluster.state
serve_nodes = 1
cluster_pb = data_v2_pb2.Cluster(
name=name,
location=LOCATION_ID,
state=cluster_state,
serve_nodes=serve_nodes,
default_storage_type=STORAGE_TYPE,
)
cluster2 = cluster.from_pb(cluster_pb, instance)
# [END bigtable_api_cluster_from_pb]
assert cluster2.name == cluster.name
def test_bigtable_instance_state():
# [START bigtable_api_instance_state]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
instance_state = instance.state
# [END bigtable_api_instance_state]
assert not instance_state
def test_bigtable_cluster_state():
# [START bigtable_api_cluster_state]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
cluster = instance.cluster(CLUSTER_ID)
cluster_state = cluster.state
# [END bigtable_api_cluster_state]
assert not cluster_state
if __name__ == "__main__":
pytest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.