code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
# Copyright 2015 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import service_configuration_lib
class TestServiceConfigurationLib:
fake_service_configuration = {
'fake_service1': {
'deployed_to': None,
'monitoring': {
'fake_monitoring_key': 'fake_monitoring_value',
},
'deploy': {},
'port': 11111,
'runs_on': [
'fake_hostname3',
'fake_hostname2',
'fake_hostname1',
],
},
'fake_service2': {
'deployed_to': [
'fake_deployed_hostname1',
'fake_deployed_hostname2',
'fake_hostname4',
],
'monitoring': {},
'port': 22222,
'runs_on': [
'fake_hostname2',
'fake_hostname3',
'fake_hostname4',
],
},
'fake_service3': {
'deployed_to': None,
'monitoring': {},
'port': 33333,
'runs_on': [
'fake_hostname3',
'fake_hostname4',
'fake_hostname5',
],
'env_runs_on': {
'fake_env1': ['fake_hostname3'],
'fake_env2': ['fake_hostname4', 'fake_hostname5'],
},
'needs_puppet_help': True,
},
'fake_service4': {
'deployed_to': True,
'runs_on': [],
'needs_puppet_help': True,
},
'fake_service5': {
'deployed_to': [],
'runs_on': [],
'needs_puppet_help': True,
},
}
def test_generate_service_info_should_have_all_keys(self):
"""I'm not entirely sure what this test is testing since I can add a
new value or remove an old value and the test passes without changing
any code. I simplified it to make it less misleading and focus on the
one thing it does to, which is test that the arg service_information is
updated.
"""
fake_service_information = {'fakekey2': 'fakevalue2'}
fake_port = 9999
actual = service_configuration_lib.generate_service_info(
fake_service_information,
port=fake_port,
)
expected = {
# Can't use the fake_service_information because it's an
# un-nested hash at this point
'fakekey2': 'fakevalue2',
'port': fake_port,
}
assert expected == actual
def test_read_monitoring_should_return_empty_when_file_doesnt_exist(self):
expected = {}
fake_monitoring_file = 'fake_monitoring_file'
# TODO: Mock open?
actual = service_configuration_lib.read_monitoring(
fake_monitoring_file,
)
assert expected == actual
def test_read_deploy_should_return_empty_when_file_doesnt_exist(self):
expected = {}
fake_deploy_file = 'fake_deploy_file'
# TODO: Mock open?
actual = service_configuration_lib.read_deploy(
fake_deploy_file,
)
assert expected == actual
def test_read_smartstack_should_return_empty_when_file_doesnt_exist(self):
expected = {}
fake_smartstack_file = 'fake_smartstack_file'
# TODO: Mock open?
actual = service_configuration_lib.read_smartstack(
fake_smartstack_file,
)
assert expected == actual
def test_read_dependencies_return_empty_when_file_doesnt_exist(self):
expected = {}
fake_dependencies_file = 'fake_dependencies_file'
# TODO: Mock open?
actual = service_configuration_lib.read_smartstack(
fake_dependencies_file,
)
assert expected == actual
def test_services_that_run_on_should_properly_read_configuration(self):
expected = ['fake_service1', 'fake_service2']
fake_hostname = 'fake_hostname2'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.services_that_run_on(fake_hostname, fake_service_configuration)
assert sorted(expected) == sorted(actual)
def test_services_that_run_on_should_return_an_empty_array_when_the_hostname_isnt_anywhere(self):
expected = []
fake_hostname = 'non_existent_fake_hostname2'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.services_that_run_on(fake_hostname, fake_service_configuration)
assert sorted(expected) == sorted(actual)
def test_services_deployed_to_should_return_deployed_and_running_services(self):
expected = ['fake_service1', 'fake_service2', 'fake_service3', 'fake_service4']
fake_hostname = 'fake_hostname3'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.services_deployed_on(fake_hostname, fake_service_configuration)
assert set(expected) == set(actual)
def test_services_needing_puppet_help_on_should_properly_read_configuration(self):
expected = ['fake_service3', 'fake_service4']
fake_hostname = 'fake_hostname4'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.services_needing_puppet_help_on(fake_hostname, fake_service_configuration)
assert expected == actual
def test_all_nodes_that_run_should_properly_return_the_right_nodes(self):
expected = ['fake_hostname3', 'fake_hostname4', 'fake_hostname5']
fake_service = 'fake_service3'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.all_nodes_that_run(fake_service, fake_service_configuration)
assert expected == actual
def test_all_nodes_that_receive_removes_duplicates(self):
expected = [
'fake_deployed_hostname1',
'fake_deployed_hostname2',
'fake_hostname2',
'fake_hostname3',
'fake_hostname4',
]
fake_service = 'fake_service2'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.all_nodes_that_receive(fake_service, fake_service_configuration)
assert expected == actual
def test_all_nodes_that_receive_with_no_deploys_to(self):
expected = ['fake_hostname3', 'fake_hostname4', 'fake_hostname5']
fake_service = 'fake_service3'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.all_nodes_that_receive(fake_service, fake_service_configuration)
assert expected == actual
def test_all_nodes_that_receive_is_sorted(self):
expected = ['fake_hostname1', 'fake_hostname2', 'fake_hostname3']
fake_service = 'fake_service1'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.all_nodes_that_receive(fake_service, fake_service_configuration)
assert expected == actual
@mock.patch('os.path.abspath', return_value='nodir')
@mock.patch('os.listdir', return_value=['1', '2', '3'])
@mock.patch('service_configuration_lib.read_service_configuration_from_dir', return_value='hello')
def test_read_services_configuration(self, read_patch, listdir_patch, abs_patch):
expected = {'1': 'hello', '2': 'hello', '3': 'hello'}
actual = service_configuration_lib.read_services_configuration(soa_dir='testdir')
abs_patch.assert_called_once_with('testdir')
listdir_patch.assert_called_once_with('nodir')
read_patch.assert_has_calls(
[mock.call('nodir', '1'), mock.call('nodir', '2'), mock.call('nodir', '3')],
)
assert expected == actual
@mock.patch('os.path.abspath', return_value='nodir')
@mock.patch('os.listdir', return_value=['1', '2', '3'])
def test_list_services(self, listdir_patch, abs_patch):
expected = ['1', '2', '3']
actual = service_configuration_lib.list_services(soa_dir='testdir')
abs_patch.assert_called_once_with('testdir')
listdir_patch.assert_called_once_with('nodir')
assert expected == actual
def test_read_soa_metadata(self, tmpdir):
soa_dir = tmpdir.mkdir('test_read_soa_metadata')
metadata_file = soa_dir.join('.metadata.json')
metadata_file.write('{"hello":"world"}')
actual_metadata = service_configuration_lib.read_soa_metadata(soa_dir=str(soa_dir))
assert actual_metadata == {'hello': 'world'}
def test_read_soa_metadata_dne(self, tmpdir):
soa_dir = tmpdir.mkdir('test_read_soa_metadata_dne')
actual_metadata = service_configuration_lib.read_soa_metadata(soa_dir=str(soa_dir))
assert actual_metadata == {}
@mock.patch('service_configuration_lib.read_service_configuration_from_dir', return_value='bye')
@mock.patch('os.path.abspath', return_value='cafe')
def test_read_service_configuration(self, abs_patch, read_patch):
expected = 'bye'
actual = service_configuration_lib.read_service_configuration('boba', soa_dir='tea')
abs_patch.assert_called_once_with('tea')
read_patch.assert_called_once_with('cafe', 'boba')
assert expected == actual
@mock.patch('os.path.join', return_value='forever_joined')
@mock.patch('service_configuration_lib.read_port', return_value='1111')
@mock.patch('service_configuration_lib.read_monitoring', return_value='no_monitoring')
@mock.patch('service_configuration_lib.read_deploy', return_value='no_deploy')
@mock.patch('service_configuration_lib.read_data', return_value='no_data')
@mock.patch('service_configuration_lib.read_smartstack', return_value={})
@mock.patch('service_configuration_lib.read_service_information', return_value='no_info')
@mock.patch('service_configuration_lib.read_dependencies', return_value='no_dependencies')
@mock.patch('service_configuration_lib.generate_service_info', return_value={'oof': 'ouch'})
def test_read_service_configuration_from_dir(
self,
gen_patch,
deps_patch,
info_patch,
smartstack_patch,
data_patch,
deploy_patch,
monitoring_patch,
port_patch,
join_patch,
):
expected = {'oof': 'ouch'}
actual = service_configuration_lib.read_service_configuration_from_dir('never', 'die')
join_patch.assert_has_calls([
mock.call('never', 'die', 'port'),
mock.call('never', 'die', 'monitoring.yaml'),
mock.call('never', 'die', 'deploy.yaml'),
mock.call('never', 'die', 'data.yaml'),
mock.call('never', 'die', 'smartstack.yaml'),
mock.call('never', 'die', 'service.yaml'),
mock.call('never', 'die', 'dependencies.yaml'),
])
port_patch.assert_called_once_with('forever_joined')
monitoring_patch.assert_called_once_with('forever_joined')
deploy_patch.assert_called_once_with('forever_joined')
data_patch.assert_called_once_with('forever_joined')
smartstack_patch.assert_called_once_with('forever_joined')
info_patch.assert_called_once_with('forever_joined')
deps_patch.assert_called_once_with('forever_joined')
gen_patch.assert_called_once_with(
'no_info', port='1111',
monitoring='no_monitoring',
deploy='no_deploy',
data='no_data',
dependencies='no_dependencies',
smartstack={},
)
assert expected == actual
@mock.patch('os.path.join', return_value='together_forever')
@mock.patch('os.path.abspath', return_value='real_soa_dir')
@mock.patch('service_configuration_lib.read_yaml_file', return_value={'what': 'info'})
def test_read_extra_service_information(self, info_patch, abs_patch, join_patch):
expected = {'what': 'info'}
actual = service_configuration_lib.read_extra_service_information(
'noname',
'noinfo', soa_dir='whatsadir',
)
abs_patch.assert_called_once_with('whatsadir')
join_patch.assert_called_once_with('real_soa_dir', 'noname', 'noinfo.yaml')
info_patch.assert_called_once_with('together_forever', deepcopy=True)
assert expected == actual
@mock.patch('io.open', autospec=True)
@mock.patch('service_configuration_lib.load_yaml', return_value={'data': 'mock'})
def testread_yaml_file_single(self, load_patch, open_patch):
expected = {'data': 'mock'}
filename = 'fake_fname_uno'
actual = service_configuration_lib.read_yaml_file(filename)
open_patch.assert_called_once_with(filename, 'r', encoding='UTF-8')
load_patch.assert_called_once_with(open_patch.return_value.__enter__().read())
assert expected == actual
@mock.patch('io.open', autospec=True)
@mock.patch('service_configuration_lib.load_yaml', return_value={'mmmm': 'tests'})
def testread_yaml_file_with_cache(self, load_patch, open_patch):
expected = {'mmmm': 'tests'}
filename = 'fake_fname_dos'
service_configuration_lib.enable_yaml_cache()
actual = service_configuration_lib.read_yaml_file(filename)
actual_two = service_configuration_lib.read_yaml_file(filename)
open_patch.assert_called_once_with(filename, 'r', encoding='UTF-8')
load_patch.assert_called_once_with(open_patch.return_value.__enter__().read())
assert expected == actual
assert expected == actual_two
# When we cache, we can NOT return a pointer to the original object
# because the caller might mutate it. We need to ensure that
# the returned object is a copy.
assert expected is not actual_two
@mock.patch('io.open', autospec=True)
@mock.patch('service_configuration_lib.load_yaml', return_value={'water': 'slide'})
def testread_yaml_file_no_cache(self, load_patch, open_patch):
expected = {'water': 'slide'}
filename = 'fake_fname_tres'
service_configuration_lib.disable_yaml_cache()
actual = service_configuration_lib.read_yaml_file(filename)
actual_two = service_configuration_lib.read_yaml_file(filename)
open_patch.assert_any_call(filename, 'r', encoding='UTF-8')
assert open_patch.call_count == 2
load_patch.assert_any_call(open_patch.return_value.__enter__().read())
assert load_patch.call_count == 2
assert expected == actual
assert expected == actual_two
def test_env_runs_on(self):
expected = ['fake_hostname3']
actual = service_configuration_lib.all_nodes_that_run_in_env(
'fake_service3',
'fake_env1',
service_configuration=self.fake_service_configuration,
)
assert expected == actual
expected = ['fake_hostname4', 'fake_hostname5']
actual = service_configuration_lib.all_nodes_that_run_in_env(
'fake_service3',
'fake_env2',
service_configuration=self.fake_service_configuration,
)
assert expected == actual
def test_bad_port_get_service_from_port(self):
'Test for bad inputs'
service_name = service_configuration_lib.get_service_from_port(None)
assert service_name is None
service_name = service_configuration_lib.get_service_from_port({})
assert service_name is None
def test_valid_port_get_service_from_port(self):
'Test that if there is a service for that port it returns it'
all_services = {
'Other Service': {
'port': 2352,
},
'Service 23': {
'port': 656,
},
'Test Service': {
'port': 100,
},
'Smart Service': {
'port': 345,
'smartstack': {
'main': {
'proxy_port': 3444,
},
},
},
'Service 36': {
'port': 636,
},
}
found_service_name = service_configuration_lib.get_service_from_port(100, all_services)
assert found_service_name == 'Test Service'
found_service_name = service_configuration_lib.get_service_from_port(3444, all_services)
assert found_service_name == 'Smart Service'
| Yelp/service_configuration_lib | tests/service_configuration_lib_test.py | Python | apache-2.0 | 17,123 |
# Programar em Python #05 - Operadores de Atribuição
a = b = c = d = e = f = 2;
a += 3;
b -= 2;
c *= 2;
d /= 2;
e %= 2;
f **= 2;
print('Valor de a:', a);
print('Valor de b:', b);
print('Valor de c:', c);
print('Valor de d:', d);
print('Valor de e:', e);
print('Valor de f:', f); | caffeinealgorithm/youtube-videos-source-code | Programar em Python/05-Operadores-de-Atribuicao.py | Python | mit | 283 |
"""Utility file for building Qt ui files."""
import sys
import os
from hashlib import md5
import json
from glob import glob
if 'win' in sys.platform:
cmd = 'pyuic4.bat'
else:
cmd = 'pyuic4'
hash_file = 'hashes.json'
try:
hashes = json.load(open(hash_file, 'r'))
except:
hashes = {}
for fname in glob('*.ui'):
pre = fname.split('.')[0]
ui_py_fname = 'ui_' + pre + '.py'
new_hash = md5(open(fname).read()).hexdigest()
if hashes.get(fname, None) != new_hash:
full_cmd = cmd + ' ' + fname + ' -o ' + ui_py_fname
print(full_cmd)
os.system(full_cmd)
hashes[fname] = new_hash
with open(hash_file, 'w') as output:
json.dump(hashes, output, indent=4, sort_keys=True)
| mivade/qCamera | viewer/build_uis.py | Python | bsd-2-clause | 735 |
'''
Created on Jan 8, 2014
@author: xapharius
'''
from abc import ABCMeta, abstractmethod
class AbstractDataProcessor(object):
'''
Abstract class for a Dataprocessor.
Specifies which methods a Dataprocessor of a Data Class should implement.
The Dataprocessor is responsible of processing the data received in the map phase of the engine, in order
for the model to learn.
'''
__metaclass__ = ABCMeta
def __init__(self):
'''
Constructor
'''
def set_data(self, raw_data):
self.raw_data = raw_data
@abstractmethod
def normalize_data(self, stats):
'''
Normalizes Local Data using the statistics from the preprocessor.
'''
pass
@abstractmethod
def get_data_set(self):
'''
Package and return processed data
@return: Dataclass specific DataSet
'''
pass
| xapharius/mrEnsemble | Engine/src/datahandler/AbstractDataProcessor.py | Python | mit | 913 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_filter
short_description: Manages top level filter objects (vz:Filter)
description:
- Manages top level filter objects on Cisco ACI fabrics.
- This modules does not manage filter entries, see M(aci_filter_entry) for this functionality.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information about the internal APIC class B(vz:Filter) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
filter:
description:
- The name of the filter.
required: yes
aliases: [ filter_name, name ]
description:
description:
- Description for the filter.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new filter to a tenant
aci_filter:
host: apic
username: admin
password: SomeSecretPassword
filter: web_filter
description: Filter for web protocols
tenant: production
state: present
- name: Remove a filter for a tenant
aci_filter:
host: apic
username: admin
password: SomeSecretPassword
filter: web_filter
tenant: production
state: absent
- name: Query a filter of a tenant
aci_filter:
host: apic
username: admin
password: SomeSecretPassword
filter: web_filter
tenant: production
state: query
- name: Query all filters for a tenant
aci_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
state: query
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
filter=dict(type='str', required=False, aliases=['name', 'filter_name']), # Not required for querying all objects
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['filter', 'tenant']],
['state', 'present', ['filter', 'tenant']],
],
)
filter_name = module.params['filter']
description = module.params['description']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='vzFilter',
aci_rn='flt-{0}'.format(filter_name),
filter_target='eq(vzFilter.name, "{0}")'.format(filter_name),
module_object=filter_name,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='vzFilter',
class_config=dict(
name=filter_name,
descr=description,
),
)
aci.get_diff(aci_class='vzFilter')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| hryamzik/ansible | lib/ansible/modules/network/aci/aci_filter.py | Python | gpl-3.0 | 6,994 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio user management and authentication."""
from __future__ import absolute_import, print_function
import os
import pkg_resources
import six
from flask import current_app, request_finished, session
from flask_kvsession import KVSessionExtension
from flask_login import LoginManager, user_logged_in, user_logged_out
from flask_principal import AnonymousIdentity
from flask_security import Security, changeable, recoverable, registerable, \
utils
from invenio_db import db
from passlib.registry import register_crypt_handler
from werkzeug.utils import cached_property, import_string
from invenio_accounts.forms import confirm_register_form_factory, \
login_form_factory, register_form_factory
from . import config
from .datastore import SessionAwareSQLAlchemyUserDatastore
from .hash import InvenioAesEncryptedEmail, _to_binary
from .models import Role, User
from .sessions import login_listener, logout_listener
from .utils import obj_or_import_string, set_session_info
def get_hmac(password):
"""Override Flask-Security's default MAC signing of plain passwords.
:param password: The plain password.
:returns: The password hmac.
"""
return _to_binary(password)
def hash_password(password):
"""Override Flask-Security's default hashing function.
:param password: The plain password.
:returns: The hashed password.
"""
return current_app.extensions['security'].pwd_context.hash(password)
class InvenioAccounts(object):
"""Invenio-Accounts extension."""
def __init__(self, app=None, sessionstore=None):
"""Extension initialization.
:param app: The Flask application.
:param sessionstore: store for sessions. Passed to
``flask-kvsession``. Defaults to redis.
"""
self.security = Security()
self.datastore = None
if app:
self.init_app(app, sessionstore=sessionstore)
@staticmethod
def monkey_patch_flask_security():
"""Monkey-patch Flask-Security."""
if utils.get_hmac != get_hmac:
utils.get_hmac = get_hmac
if utils.hash_password != hash_password:
utils.hash_password = hash_password
changeable.hash_password = hash_password
recoverable.hash_password = hash_password
registerable.hash_password = hash_password
# Disable remember me cookie generation as it does not work with
# session activity tracking (a remember me token will bypass revoking
# of a session).
def patch_do_nothing(*args, **kwargs):
pass
LoginManager._set_cookie = patch_do_nothing
# Disable loading user from headers and object because we want to be
# sure we can load user only through the login form.
def patch_reload_anonym(self, *args, **kwargs):
self.reload_user()
LoginManager._load_from_header = patch_reload_anonym
LoginManager._load_from_request = patch_reload_anonym
@cached_property
def jwt_decode_factory(self):
"""Load default JWT veryfication factory."""
return obj_or_import_string(
current_app.config.get('ACCOUNTS_JWT_DECODE_FACTORY'))
@cached_property
def jwt_creation_factory(self):
"""Load default JWT creation factory."""
return obj_or_import_string(
current_app.config.get('ACCOUNTS_JWT_CREATION_FACTORY'))
def register_anonymous_identity_loader(self, state):
"""Registers a loader for AnonymousIdentity.
Additional loader is necessary for applying a need 'any-user' to
AnonymousUser in the invenio-access module
"""
# Attention: the order of the loaders is important
# append is used here instead of decorator to enforce the order
state.principal.identity_loaders.append(AnonymousIdentity)
def init_app(self, app, sessionstore=None, register_blueprint=True):
"""Flask application initialization.
The following actions are executed:
#. Initialize the configuration.
#. Monkey-patch Flask-Security.
#. Create the user datastore.
#. Create the sessionstore.
#. Initialize the extension, the forms to register users and
confirms their emails, the CLI and, if ``ACCOUNTS_USE_CELERY`` is
``True``, register a celery task to send emails.
:param app: The Flask application.
:param sessionstore: store for sessions. Passed to
``flask-kvsession``. If ``None`` then Redis is configured.
(Default: ``None``)
:param register_blueprint: If ``True``, the application registers the
blueprints. (Default: ``True``)
"""
self.init_config(app)
# Monkey-patch Flask-Security
InvenioAccounts.monkey_patch_flask_security()
# Create user datastore
if not self.datastore:
self.datastore = SessionAwareSQLAlchemyUserDatastore(
db, User, Role)
if app.config['ACCOUNTS_SESSION_ACTIVITY_ENABLED']:
self._enable_session_activity(app=app)
# Initialize extension.
_register_blueprint = app.config.get('ACCOUNTS_REGISTER_BLUEPRINT')
if _register_blueprint is not None:
register_blueprint = _register_blueprint
state = self.security.init_app(app, datastore=self.datastore,
register_blueprint=register_blueprint)
self.register_anonymous_identity_loader(state)
app.extensions['security'].register_form = register_form_factory(
app.extensions['security'].register_form, app)
app.extensions['security'].confirm_register_form = \
confirm_register_form_factory(
app.extensions['security'].confirm_register_form, app
)
app.extensions['security'].login_form = login_form_factory(
app.extensions['security'].login_form, app)
if app.config['ACCOUNTS_USE_CELERY']:
from invenio_accounts.tasks import send_security_email
@state.send_mail_task
def delay_security_email(msg):
send_security_email.delay(msg.__dict__)
# Register context processor
if app.config['ACCOUNTS_JWT_DOM_TOKEN']:
from invenio_accounts.context_processors.jwt import \
jwt_proccessor
app.context_processor(jwt_proccessor)
# Register signal receiver
if app.config.get('ACCOUNTS_USERINFO_HEADERS'):
request_finished.connect(set_session_info, app)
# Set Session KV store
session_kvstore_factory = obj_or_import_string(
app.config['ACCOUNTS_SESSION_STORE_FACTORY'])
session_kvstore = session_kvstore_factory(app)
self.kvsession_extension = KVSessionExtension(
session_kvstore, app)
app.extensions['invenio-accounts'] = self
def init_config(self, app):
"""Initialize configuration.
:param app: The Flask application.
"""
try:
pkg_resources.get_distribution('celery')
app.config.setdefault(
"ACCOUNTS_USE_CELERY", not (app.debug or app.testing))
except pkg_resources.DistributionNotFound: # pragma: no cover
app.config.setdefault("ACCOUNTS_USE_CELERY", False)
# Register Invenio legacy password hashing
register_crypt_handler(InvenioAesEncryptedEmail)
# Change Flask defaults
app.config.setdefault(
'SESSION_COOKIE_SECURE',
not app.debug
)
# Change Flask-Security defaults
app.config.setdefault(
'SECURITY_PASSWORD_SALT',
app.config['SECRET_KEY']
)
# Set JWT secret key
app.config.setdefault(
'ACCOUNTS_JWT_SECRET_KEY',
app.config.get(
'ACCOUNTS_JWT_SECRET_KEY',
app.config.get('SECRET_KEY')
)
)
config_apps = ['ACCOUNTS', 'SECURITY_']
for k in dir(config):
if any([k.startswith(prefix) for prefix in config_apps]):
app.config.setdefault(k, getattr(config, k))
def _enable_session_activity(self, app):
"""Enable session activity."""
user_logged_in.connect(login_listener, app)
user_logged_out.connect(logout_listener, app)
from .views.settings import blueprint
from .views.security import security, revoke_session
blueprint.route('/security/', methods=['GET'])(security)
blueprint.route('/sessions/revoke/', methods=['POST'])(revoke_session)
class InvenioAccountsREST(InvenioAccounts):
"""Invenio-Accounts REST extension."""
def init_app(self, app, sessionstore=None, register_blueprint=False):
"""Flask application initialization.
:param app: The Flask application.
:param sessionstore: store for sessions. Passed to
``flask-kvsession``. If ``None`` then Redis is configured.
(Default: ``None``)
:param register_blueprint: If ``True``, the application registers the
blueprints. (Default: ``True``)
"""
return super(InvenioAccountsREST, self).init_app(
app, sessionstore=sessionstore,
register_blueprint=register_blueprint,
)
class InvenioAccountsUI(InvenioAccounts):
"""Invenio-Accounts UI extension."""
def init_app(self, app, sessionstore=None, register_blueprint=True):
"""Flask application initialization.
:param app: The Flask application.
:param sessionstore: store for sessions. Passed to
``flask-kvsession``. If ``None`` then Redis is configured.
(Default: ``None``)
:param register_blueprint: If ``True``, the application registers the
blueprints. (Default: ``True``)
"""
self.make_session_permanent(app)
return super(InvenioAccountsUI, self).init_app(
app, sessionstore=sessionstore,
register_blueprint=register_blueprint
)
def make_session_permanent(self, app):
"""Make session permanent by default.
Set `PERMANENT_SESSION_LIFETIME` to specify time-to-live
"""
@app.before_request
def make_session_permanent():
session.permanent = True
| inspirehep/invenio-accounts | invenio_accounts/ext.py | Python | mit | 10,664 |
""" __
/ _) <( ** basic_utils *** )
.-^^^-/ /
__/ /
<__.|_|-|_|
"""
from .core import * # noqa
from .date_helpers import * # noqa
from .dict_helpers import * # noqa
from .primitives import * # noqa
from .seq_helpers import * # noqa
__version__ = '1.6.0'
| Jackevansevo/basic-utils | basic_utils/__init__.py | Python | mit | 310 |
from django.conf.urls import (
patterns, url, include
)
from django.core.urlresolvers import (reverse_lazy)
from django.views.generic.base import RedirectView
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
from .views import (
PackageUpdate,
ServerCreate,
# FarmServerList
)
urlpatterns = patterns('',
url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS
url(r'^admin/', include(admin.site.urls)),
url(r'^$', RedirectView.as_view(url=reverse_lazy('setup'), permanent=True)),
url(r'^setup/$', 'rds.views.setup', name='setup'),
url(r'^setup/ad/external/$', 'rds.views.ad_setup', name='ad_setup'),
# url(r'^setup/ad/type/$', 'rds.views.ad_type', name='ad_type'),
url(r'^setup/cancel/$', 'rds.views.cancel', name='cancel'),
# url(r'^setup/server/$', 'rds.views.server_setup', name='server_setup'),
url(r'^software/$', RedirectView.as_view(url=reverse_lazy('package_list_redirect'), permanent=True), name='software'),
url(r'^software/store/$', 'rds.views.software_cloud', name='software_cloud'),
url(r'^software/local/$', 'rds.views.package_list_redirect', name='package_list_redirect'),
url(r'^software/local/(?P<pk>\d+)/$', 'rds.views.package_list', name='package_list'),
url(r'^software/local/add/$', 'rds.views.package_add', name='package_add'),
url(r'^software/local/delete/(?P<pk>\d+)/$', 'rds.views.package_delete', name='package_delete'),
url(r'^software/local/edit/(?P<pk>\d+)/$', PackageUpdate.as_view(), name='package_update'),
url(r'^software/local/install/(?P<pk>\d+)/$', 'rds.views.package_install', name='package_install'),
url(r'^software/local/uninstall/(?P<pk>\d+)/$', 'rds.views.package_uninstall', name='package_uninstall'),
url(r'^software/server/packages/$', 'rds.views.server_package_list', name='server_package_list'),
url(r'^software/server/applications/$', 'rds.views.applications', name='applications'),
url(r'^software/server/applications/refresh/$', 'rds.views.applications_refresh', name='applications_refresh'),
url(r'^farms/$', 'rds.views.farm_list', name='farm_list'),
url(r'^farms/add/$', 'rds.views.farm_add', name='farm_add'),
url(r'^farms/(?P<pk>\d+)/$', 'rds.views.farm_show', name='farm_show'),
url(r'^farms/(?P<pk>\d+)/clone/$', 'rds.views.farm_clone', name='farm_clone'),
url(r'^farms/(?P<pk>\d+)/delete/$', 'rds.views.farm_delete', name='farm_delete'),
url(r'^farms/(?P<pk>\d+)/deployment/$', 'rds.views.farm_deployment', name='farm_deployment'),
url(r'^farms/(?P<pk>\d+)/software/$', 'rds.views.farm_package_list', name='farm_package_list'),
url(r'^farms/(?P<pk>\d+)/install_packages/$', 'rds.views.farm_install_packages',name='farm_install_packages'),
url(r'^farms/(?P<pk>\d+)/setup/$', 'rds.views.farm_setup', name='farm_setup'),
url(r'^farms/package/(?P<farm_package_pk>\d+)/delete/$', 'rds.views.farm_package_delete', name='farm_package_delete'),
url(r'^farms/(?P<farm_pk>\d+)/package/(?P<package_pk>\d+)/add/$', 'rds.views.farm_package_add', name='farm_package_add'),
# url(r'^farms/deployment/', FarmServerList.as_view(), name='deployment'),
url(r'^farms/deployment/publish/(?P<pk>\d+)/$', 'rds.views.deployment_publish', name='deployment_publish'),
url(r'^farms/deployment/unpublish/(?P<pk>\d+)/$', 'rds.views.deployment_unpublish', name='deployment_unpublish'),
url(r'^server/(?P<pk>\d+)/delete/', 'rds.views.server_delete', name='server_delete'),
url(r'^api/join/$', 'rds.views.join', name='api_join'),
url(r'^api/server/(?P<pk>\d+)/rdp/settings.rdp', 'rds.views.rdp_settings', name='rdp_settings'),
url(r'^api/server/create/$', ServerCreate.as_view(), name='server_create'),
)
| jakobadam/origo-desktops | rds/urls.py | Python | gpl-3.0 | 4,635 |
import os, sys, projects
source_suffix = '.txt'
master_doc = 'contents'
project = 'django-projects'
copyright = 'Kyle Fuller and Lithum contributors'
version = projects.__version__
release = version
today_fmt = '%B %d, %Y'
add_function_parentheses = True
add_module_names = False | kylef/django-projects | docs/conf.py | Python | bsd-2-clause | 280 |
# coding: utf-8
from application.common import BaseHandler
from ..templates import environment
class UserSettings(BaseHandler):
def get(self):
template = environment.get_template('user_settings.htm')
self.response.write(template.render())
| paulsnar/edaemon | application/admin/routes/user_settings.py | Python | bsd-3-clause | 262 |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AuthTestTidRequest(AbstractModel):
"""AuthTestTid请求参数结构体
"""
def __init__(self):
r"""
:param Data: 设备端SDK填入测试TID参数后生成的加密数据串
:type Data: str
"""
self.Data = None
def _deserialize(self, params):
self.Data = params.get("Data")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AuthTestTidResponse(AbstractModel):
"""AuthTestTid返回参数结构体
"""
def __init__(self):
r"""
:param Pass: 认证结果
:type Pass: bool
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Pass = None
self.RequestId = None
def _deserialize(self, params):
self.Pass = params.get("Pass")
self.RequestId = params.get("RequestId")
class BurnTidNotifyRequest(AbstractModel):
"""BurnTidNotify请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Tid: TID编号
:type Tid: str
"""
self.OrderId = None
self.Tid = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Tid = params.get("Tid")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BurnTidNotifyResponse(AbstractModel):
"""BurnTidNotify返回参数结构体
"""
def __init__(self):
r"""
:param Tid: 接收回执成功的TID
:type Tid: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Tid = None
self.RequestId = None
def _deserialize(self, params):
self.Tid = params.get("Tid")
self.RequestId = params.get("RequestId")
class DeliverTidNotifyRequest(AbstractModel):
"""DeliverTidNotify请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Tid: TID编号
:type Tid: str
"""
self.OrderId = None
self.Tid = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Tid = params.get("Tid")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeliverTidNotifyResponse(AbstractModel):
"""DeliverTidNotify返回参数结构体
"""
def __init__(self):
r"""
:param RemaindCount: 剩余空发数量
:type RemaindCount: int
:param Tid: 已回执的TID编码
:type Tid: str
:param ProductKey: 产品公钥
:type ProductKey: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RemaindCount = None
self.Tid = None
self.ProductKey = None
self.RequestId = None
def _deserialize(self, params):
self.RemaindCount = params.get("RemaindCount")
self.Tid = params.get("Tid")
self.ProductKey = params.get("ProductKey")
self.RequestId = params.get("RequestId")
class DeliverTidsRequest(AbstractModel):
"""DeliverTids请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单ID
:type OrderId: str
:param Quantity: 数量,1~100
:type Quantity: int
"""
self.OrderId = None
self.Quantity = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Quantity = params.get("Quantity")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeliverTidsResponse(AbstractModel):
"""DeliverTids返回参数结构体
"""
def __init__(self):
r"""
:param TidSet: 空发的TID信息
注意:此字段可能返回 null,表示取不到有效值。
:type TidSet: list of TidKeysInfo
:param ProductKey: 产品公钥
:type ProductKey: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TidSet = None
self.ProductKey = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TidSet") is not None:
self.TidSet = []
for item in params.get("TidSet"):
obj = TidKeysInfo()
obj._deserialize(item)
self.TidSet.append(obj)
self.ProductKey = params.get("ProductKey")
self.RequestId = params.get("RequestId")
class DescribeAvailableLibCountRequest(AbstractModel):
"""DescribeAvailableLibCount请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
"""
self.OrderId = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAvailableLibCountResponse(AbstractModel):
"""DescribeAvailableLibCount返回参数结构体
"""
def __init__(self):
r"""
:param Quantity: 可空发的白盒密钥数量
:type Quantity: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Quantity = None
self.RequestId = None
def _deserialize(self, params):
self.Quantity = params.get("Quantity")
self.RequestId = params.get("RequestId")
class DescribePermissionRequest(AbstractModel):
"""DescribePermission请求参数结构体
"""
class DescribePermissionResponse(AbstractModel):
"""DescribePermission返回参数结构体
"""
def __init__(self):
r"""
:param EnterpriseUser: 企业用户
:type EnterpriseUser: bool
:param DownloadPermission: 下载控制台权限
:type DownloadPermission: str
:param UsePermission: 使用控制台权限
:type UsePermission: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.EnterpriseUser = None
self.DownloadPermission = None
self.UsePermission = None
self.RequestId = None
def _deserialize(self, params):
self.EnterpriseUser = params.get("EnterpriseUser")
self.DownloadPermission = params.get("DownloadPermission")
self.UsePermission = params.get("UsePermission")
self.RequestId = params.get("RequestId")
class DownloadTidsRequest(AbstractModel):
"""DownloadTids请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Quantity: 下载数量:1~10
:type Quantity: int
"""
self.OrderId = None
self.Quantity = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Quantity = params.get("Quantity")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DownloadTidsResponse(AbstractModel):
"""DownloadTids返回参数结构体
"""
def __init__(self):
r"""
:param TidSet: 下载的TID信息列表
注意:此字段可能返回 null,表示取不到有效值。
:type TidSet: list of TidKeysInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TidSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TidSet") is not None:
self.TidSet = []
for item in params.get("TidSet"):
obj = TidKeysInfo()
obj._deserialize(item)
self.TidSet.append(obj)
self.RequestId = params.get("RequestId")
class TidKeysInfo(AbstractModel):
"""系统生成的TID和密钥信息
"""
def __init__(self):
r"""
:param Tid: TID号码
:type Tid: str
:param PublicKey: 公钥
:type PublicKey: str
:param PrivateKey: 私钥
:type PrivateKey: str
:param Psk: 共享密钥
:type Psk: str
:param DownloadUrl: 软加固白盒密钥下载地址
:type DownloadUrl: str
:param DeviceCode: 软加固设备标识码
:type DeviceCode: str
"""
self.Tid = None
self.PublicKey = None
self.PrivateKey = None
self.Psk = None
self.DownloadUrl = None
self.DeviceCode = None
def _deserialize(self, params):
self.Tid = params.get("Tid")
self.PublicKey = params.get("PublicKey")
self.PrivateKey = params.get("PrivateKey")
self.Psk = params.get("Psk")
self.DownloadUrl = params.get("DownloadUrl")
self.DeviceCode = params.get("DeviceCode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UploadDeviceUniqueCodeRequest(AbstractModel):
"""UploadDeviceUniqueCode请求参数结构体
"""
def __init__(self):
r"""
:param CodeSet: 硬件唯一标识码
:type CodeSet: list of str
:param OrderId: 硬件标识码绑定的申请编号
:type OrderId: str
"""
self.CodeSet = None
self.OrderId = None
def _deserialize(self, params):
self.CodeSet = params.get("CodeSet")
self.OrderId = params.get("OrderId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UploadDeviceUniqueCodeResponse(AbstractModel):
"""UploadDeviceUniqueCode返回参数结构体
"""
def __init__(self):
r"""
:param Count: 本次已上传数量
:type Count: int
:param ExistedCodeSet: 重复的硬件唯一标识码
注意:此字段可能返回 null,表示取不到有效值。
:type ExistedCodeSet: list of str
:param LeftQuantity: 剩余可上传数量
:type LeftQuantity: int
:param IllegalCodeSet: 错误的硬件唯一标识码
注意:此字段可能返回 null,表示取不到有效值。
:type IllegalCodeSet: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Count = None
self.ExistedCodeSet = None
self.LeftQuantity = None
self.IllegalCodeSet = None
self.RequestId = None
def _deserialize(self, params):
self.Count = params.get("Count")
self.ExistedCodeSet = params.get("ExistedCodeSet")
self.LeftQuantity = params.get("LeftQuantity")
self.IllegalCodeSet = params.get("IllegalCodeSet")
self.RequestId = params.get("RequestId")
class VerifyChipBurnInfoRequest(AbstractModel):
"""VerifyChipBurnInfo请求参数结构体
"""
def __init__(self):
r"""
:param Data: 验证数据
:type Data: str
"""
self.Data = None
def _deserialize(self, params):
self.Data = params.get("Data")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class VerifyChipBurnInfoResponse(AbstractModel):
"""VerifyChipBurnInfo返回参数结构体
"""
def __init__(self):
r"""
:param Pass: 验证结果
:type Pass: bool
:param VerifiedTimes: 已验证次数
:type VerifiedTimes: int
:param LeftTimes: 剩余验证次数
:type LeftTimes: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Pass = None
self.VerifiedTimes = None
self.LeftTimes = None
self.RequestId = None
def _deserialize(self, params):
self.Pass = params.get("Pass")
self.VerifiedTimes = params.get("VerifiedTimes")
self.LeftTimes = params.get("LeftTimes")
self.RequestId = params.get("RequestId") | tzpBingo/github-trending | codespace/python/tencentcloud/iottid/v20190411/models.py | Python | mit | 15,254 |
def update_soa(record):
"""This function will attempt to find an SOA associated with an object and
if it finds an SOA will incremente the serial.
"""
if record and record.domain and record.domain.soa:
record.domain.soa.serial += 1
record.domain.soa.dirty = True
record.domain.soa.save()
| akeym/cyder | cyder/cydns/soa/utils.py | Python | bsd-3-clause | 327 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: elb_application_lb
short_description: Manage an Application load balancer
description:
- Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details.
version_added: "2.4"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
access_logs_enabled:
description:
- "Whether or not to enable access logs. When true, I(access_logs_s3_bucket) must be set."
required: false
type: bool
access_logs_s3_bucket:
description:
- The name of the S3 bucket for the access logs. This attribute is required if access logs in Amazon S3 are enabled. The bucket must exist in the same
region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket.
required: false
access_logs_s3_prefix:
description:
- The prefix for the location in the S3 bucket. If you don't specify a prefix, the access logs are stored in the root of the bucket.
required: false
deletion_protection:
description:
- Indicates whether deletion protection for the ELB is enabled.
required: false
default: no
type: bool
http2:
description:
- Indicates whether to enable HTTP2 routing.
required: false
default: no
type: bool
version_added: 2.6
idle_timeout:
description:
- The number of seconds to wait before an idle connection is closed.
required: false
default: 60
listeners:
description:
- A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys
are CamelCased.
required: false
name:
description:
- The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric
characters or hyphens, and must not begin or end with a hyphen.
required: true
purge_listeners:
description:
- If yes, existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter. If the I(listeners) parameter is
not set then listeners will not be modified
default: yes
type: bool
purge_tags:
description:
- If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
tags will not be modified.
required: false
default: yes
type: bool
subnets:
description:
- A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from
at least two Availability Zones. Required if state=present.
required: false
security_groups:
description:
- A list of the names or IDs of the security groups to assign to the load balancer. Required if state=present.
required: false
default: []
scheme:
description:
- Internet-facing or internal load balancer. An ELB scheme can not be modified after creation.
required: false
default: internet-facing
choices: [ 'internet-facing', 'internal' ]
state:
description:
- Create or destroy the load balancer.
required: true
choices: [ 'present', 'absent' ]
tags:
description:
- A dictionary of one or more tags to assign to the load balancer.
required: false
wait:
description:
- Wait for the load balancer to have a state of 'active' before completing. A status check is
performed every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
default: no
type: bool
version_added: 2.6
wait_timeout:
description:
- The time in seconds to use in conjunction with I(wait).
version_added: 2.6
extends_documentation_fragment:
- aws
- ec2
notes:
- Listeners are matched based on port. If a listener's port is changed then a new listener will be created.
- Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created.
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an ELB and attach a listener
- elb_application_lb:
name: myelb
security_groups:
- sg-12345678
- my-sec-group
subnets:
- subnet-012345678
- subnet-abcdef000
listeners:
- Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive).
Port: 80 # Required. The port on which the load balancer is listening.
# The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
SslPolicy: ELBSecurityPolicy-2015-05
Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
- CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com
DefaultActions:
- Type: forward # Required. Only 'forward' is accepted at this time
TargetGroupName: # Required. The name of the target group
state: present
# Create an ELB and attach a listener with logging enabled
- elb_application_lb:
access_logs_enabled: yes
access_logs_s3_bucket: mybucket
access_logs_s3_prefix: "/logs"
name: myelb
security_groups:
- sg-12345678
- my-sec-group
subnets:
- subnet-012345678
- subnet-abcdef000
listeners:
- Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive).
Port: 80 # Required. The port on which the load balancer is listening.
# The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
SslPolicy: ELBSecurityPolicy-2015-05
Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
- CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com
DefaultActions:
- Type: forward # Required. Only 'forward' is accepted at this time
TargetGroupName: # Required. The name of the target group
state: present
# Create an ALB with listeners and rules
- elb_application_lb:
name: test-alb
subnets:
- subnet-12345678
- subnet-87654321
security_groups:
- sg-12345678
scheme: internal
listeners:
- Protocol: HTTPS
Port: 443
DefaultActions:
- Type: forward
TargetGroupName: test-target-group
Certificates:
- CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com
SslPolicy: ELBSecurityPolicy-2015-05
Rules:
- Conditions:
- Field: path-pattern
Values:
- '/test'
Priority: '1'
Actions:
- TargetGroupName: test-target-group
Type: forward
state: present
# Remove an ELB
- elb_application_lb:
name: myelb
state: absent
'''
RETURN = '''
access_logs_s3_bucket:
description: The name of the S3 bucket for the access logs.
returned: when state is present
type: string
sample: mys3bucket
access_logs_s3_enabled:
description: Indicates whether access logs stored in Amazon S3 are enabled.
returned: when state is present
type: string
sample: true
access_logs_s3_prefix:
description: The prefix for the location in the S3 bucket.
returned: when state is present
type: string
sample: /my/logs
availability_zones:
description: The Availability Zones for the load balancer.
returned: when state is present
type: list
sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]"
canonical_hosted_zone_id:
description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
returned: when state is present
type: string
sample: ABCDEF12345678
created_time:
description: The date and time the load balancer was created.
returned: when state is present
type: string
sample: "2015-02-12T02:14:02+00:00"
deletion_protection_enabled:
description: Indicates whether deletion protection is enabled.
returned: when state is present
type: string
sample: true
dns_name:
description: The public DNS name of the load balancer.
returned: when state is present
type: string
sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
idle_timeout_timeout_seconds:
description: The idle timeout value, in seconds.
returned: when state is present
type: string
sample: 60
ip_address_type:
description: The type of IP addresses used by the subnets for the load balancer.
returned: when state is present
type: string
sample: ipv4
listeners:
description: Information about the listeners.
returned: when state is present
type: complex
contains:
listener_arn:
description: The Amazon Resource Name (ARN) of the listener.
returned: when state is present
type: string
sample: ""
load_balancer_arn:
description: The Amazon Resource Name (ARN) of the load balancer.
returned: when state is present
type: string
sample: ""
port:
description: The port on which the load balancer is listening.
returned: when state is present
type: int
sample: 80
protocol:
description: The protocol for connections from clients to the load balancer.
returned: when state is present
type: string
sample: HTTPS
certificates:
description: The SSL server certificate.
returned: when state is present
type: complex
contains:
certificate_arn:
description: The Amazon Resource Name (ARN) of the certificate.
returned: when state is present
type: string
sample: ""
ssl_policy:
description: The security policy that defines which ciphers and protocols are supported.
returned: when state is present
type: string
sample: ""
default_actions:
description: The default actions for the listener.
returned: when state is present
type: string
contains:
type:
description: The type of action.
returned: when state is present
type: string
sample: ""
target_group_arn:
description: The Amazon Resource Name (ARN) of the target group.
returned: when state is present
type: string
sample: ""
load_balancer_arn:
description: The Amazon Resource Name (ARN) of the load balancer.
returned: when state is present
type: string
sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
load_balancer_name:
description: The name of the load balancer.
returned: when state is present
type: string
sample: my-elb
routing_http2_enabled:
description: Indicates whether HTTP/2 is enabled.
returned: when state is present
type: string
sample: true
scheme:
description: Internet-facing or internal load balancer.
returned: when state is present
type: string
sample: internal
security_groups:
description: The IDs of the security groups for the load balancer.
returned: when state is present
type: list
sample: ['sg-0011223344']
state:
description: The state of the load balancer.
returned: when state is present
type: dict
sample: "{'code': 'active'}"
tags:
description: The tags attached to the load balancer.
returned: when state is present
type: dict
sample: "{
'Tag': 'Example'
}"
type:
description: The type of load balancer.
returned: when state is present
type: string
sample: application
vpc_id:
description: The ID of the VPC for the load balancer.
returned: when state is present
type: string
sample: vpc-0011223344
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, camel_dict_to_snake_dict, ec2_argument_spec, \
boto3_tag_list_to_ansible_dict, compare_aws_tags, HAS_BOTO3
from ansible.module_utils.aws.elbv2 import ApplicationLoadBalancer, ELBListeners, ELBListener, ELBListenerRules, ELBListenerRule
from ansible.module_utils.aws.elb_utils import get_elb_listener_rules
def create_or_update_elb(elb_obj):
"""Create ELB or modify main attributes. json_exit here"""
if elb_obj.elb:
# ELB exists so check subnets, security groups and tags match what has been passed
# Subnets
if not elb_obj.compare_subnets():
elb_obj.modify_subnets()
# Security Groups
if not elb_obj.compare_security_groups():
elb_obj.modify_security_groups()
# Tags - only need to play with tags if tags parameter has been set to something
if elb_obj.tags is not None:
# Delete necessary tags
tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']),
boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags)
if tags_to_delete:
elb_obj.delete_tags(tags_to_delete)
# Add/update tags
if tags_need_modify:
elb_obj.modify_tags()
else:
# Create load balancer
elb_obj.create_elb()
# ELB attributes
elb_obj.update_elb_attributes()
elb_obj.modify_elb_attributes()
# Listeners
listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'])
listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners()
# Delete listeners
for listener_to_delete in listeners_to_delete:
listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn'])
listener_obj.delete()
listeners_obj.changed = True
# Add listeners
for listener_to_add in listeners_to_add:
listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn'])
listener_obj.add()
listeners_obj.changed = True
# Modify listeners
for listener_to_modify in listeners_to_modify:
listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn'])
listener_obj.modify()
listeners_obj.changed = True
# If listeners changed, mark ELB as changed
if listeners_obj.changed:
elb_obj.changed = True
# Rules of each listener
for listener in listeners_obj.listeners:
if 'Rules' in listener:
rules_obj = ELBListenerRules(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port'])
rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules()
# Delete rules
for rule in rules_to_delete:
rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn)
rule_obj.delete()
elb_obj.changed = True
# Add rules
for rule in rules_to_add:
rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn)
rule_obj.create()
elb_obj.changed = True
# Modify rules
for rule in rules_to_modify:
rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn)
rule_obj.modify()
elb_obj.changed = True
# Get the ELB again
elb_obj.update()
# Get the ELB listeners again
listeners_obj.update()
# Update the ELB attributes
elb_obj.update_elb_attributes()
# Convert to snake_case and merge in everything we want to return to the user
snaked_elb = camel_dict_to_snake_dict(elb_obj.elb)
snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes))
snaked_elb['listeners'] = []
for listener in listeners_obj.current_listeners:
# For each listener, get listener rules
listener['rules'] = get_elb_listener_rules(elb_obj.connection, elb_obj.module, listener['ListenerArn'])
snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener))
# Change tags to ansible friendly dict
snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags'])
elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb)
def delete_elb(elb_obj):
if elb_obj.elb:
elb_obj.delete()
elb_obj.module.exit_json(changed=elb_obj.changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
access_logs_enabled=dict(type='bool'),
access_logs_s3_bucket=dict(type='str'),
access_logs_s3_prefix=dict(type='str'),
deletion_protection=dict(type='bool'),
http2=dict(type='bool'),
idle_timeout=dict(type='int'),
listeners=dict(type='list',
elements='dict',
options=dict(
Protocol=dict(type='str', required=True),
Port=dict(type='int', required=True),
SslPolicy=dict(type='str'),
Certificates=dict(type='list'),
DefaultActions=dict(type='list', required=True),
Rules=dict(type='list')
)
),
name=dict(required=True, type='str'),
purge_listeners=dict(default=True, type='bool'),
purge_tags=dict(default=True, type='bool'),
subnets=dict(type='list'),
security_groups=dict(type='list'),
scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']),
state=dict(choices=['present', 'absent'], type='str'),
tags=dict(type='dict'),
wait_timeout=dict(type='int'),
wait=dict(default=False, type='bool')
)
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[
('state', 'present', ['subnets', 'security_groups'])
],
required_together=(
['access_logs_enabled', 'access_logs_s3_bucket', 'access_logs_s3_prefix']
)
)
# Quick check of listeners parameters
listeners = module.params.get("listeners")
if listeners is not None:
for listener in listeners:
for key in listener.keys():
if key == 'Protocol' and listener[key] == 'HTTPS':
if listener.get('SslPolicy') is None:
module.fail_json(msg="'SslPolicy' is a required listener dict key when Protocol = HTTPS")
if listener.get('Certificates') is None:
module.fail_json(msg="'Certificates' is a required listener dict key when Protocol = HTTPS")
connection = module.client('elbv2')
connection_ec2 = module.client('ec2')
state = module.params.get("state")
elb = ApplicationLoadBalancer(connection, connection_ec2, module)
if state == 'present':
create_or_update_elb(elb)
else:
delete_elb(elb)
if __name__ == '__main__':
main()
| tareqalayan/ansible | lib/ansible/modules/cloud/amazon/elb_application_lb.py | Python | gpl-3.0 | 21,179 |
#!/usr/bin/python
import argparse
import sys
import logging
import subprocess
from pdftableextract.core import process_page, output
import pdftableextract.core
#-----------------------------------------------------------------------
def procargs() :
p = argparse.ArgumentParser( description="Finds tables in a PDF page.")
p.add_argument("-i", dest='infile', help="input file" )
p.add_argument("-o", dest='outfile', help="output file", default=None,
type=str)
p.add_argument("--greyscale_threshold","-g", help="grayscale threshold (%%)", type=int, default=25 )
p.add_argument("-p", type=str, dest='page', required=True, action="append",
help="a page in the PDF to process, as page[:firstrow:lastrow]." )
p.add_argument("-c", type=str, dest='crop',
help="crop to left:top:right:bottom. Paints white outside this "
"rectangle." )
p.add_argument("--line_length", "-l", type=float, default=0.17 ,
help="line length threshold (length)" )
p.add_argument("--bitmap_resolution", "-r", type=int, default=300,
help="resolution of internal bitmap (dots per length unit)" )
p.add_argument("-name", help="name to add to XML tag, or HTML comments")
p.add_argument("-pad", help="imitial image pading (pixels)", type=int,
default=2 )
p.add_argument("-white",action="append",
help="paint white to the bitmap as left:top:right:bottom in length units."
"Done before painting black" )
p.add_argument("-black",action="append",
help="paint black to the bitmap as left:top:right:bottom in length units."
"Done after poainting white" )
p.add_argument("-bitmap", action="store_true",
help = "Dump working bitmap not debuging image." )
p.add_argument("-checkcrop", action="store_true",
help = "Stop after finding croping rectangle, and output debuging "
"image (use -bitmap).")
p.add_argument("-checklines", action="store_true",
help = "Stop after finding lines, and output debuging image." )
p.add_argument("-checkdivs", action="store_true",
help = "Stop after finding dividors, and output debuging image." )
p.add_argument("-checkcells", action="store_true",
help = "Stop after finding cells, and output debuging image." )
p.add_argument("-colmult", type=float, default=1.0,
help = "color cycling multiplyer for checkcells and chtml" )
p.add_argument("-boxes", action="store_true",
help = "Just output cell corners, don't send cells to pdftotext." )
p.add_argument("-t", choices=['cells_csv','cells_json','cells_xml',
'table_csv','table_html','table_chtml','table_list'],
default="cells_xml",
help = "output type (table_chtml is colorized like '-checkcells') "
"(default cells_xml)" )
p.add_argument("--whitespace","-w", choices=['none','normalize','raw'], default="normalize",
help = "What to do with whitespace in cells. none = remove it all, "
"normalize (default) = any whitespace (including CRLF) replaced "
"with a single space, raw = do nothing." )
p.add_argument("--traceback","--backtrace","-tb","-bt",action="store_true")
return p.parse_args()
def main():
try:
args = procargs()
imain(args)
except IOError as e:
if args.traceback:
raise
sys.exit("I/O Error running pdf-table-extract: {0}".format(e))
except OSError as e:
print("An OS Error occurred running pdf-table-extract: Is `pdftoppm` installed and available?")
if args.traceback:
raise
sys.exit("OS Error: {0}".format(e))
except subprocess.CalledProcessError as e:
if args.traceback:
raise
sys.exit("Error while checking a subprocess call: {0}".format(e))
except Exception as e:
if args.traceback:
raise
sys.exit(e)
def imain(args):
cells = []
if args.checkcrop or args.checklines or args.checkdivs or args.checkcells:
for pgs in args.page :
success = process_page(args.infile, pgs,
bitmap=args.bitmap,
checkcrop=args.checkcrop,
checklines=args.checklines,
checkdivs=args.checkdivs,
checkcells=args.checkcells,
whitespace=args.whitespace,
boxes=args.boxes,
greyscale_threshold=args.greyscale_threshold,
page=args.page,
crop=args.crop,
line_length=args.line_length,
bitmap_resolution=args.bitmap_resolution,
name=args.name,
pad=args.pad,
white=args.white,
black=args.black, outfilename=args.outfile)
else:
for pgs in args.page :
cells.extend(process_page(args.infile, pgs,
bitmap=args.bitmap,
checkcrop=args.checkcrop,
checklines=args.checklines,
checkdivs=args.checkdivs,
checkcells=args.checkcells,
whitespace=args.whitespace,
boxes=args.boxes,
greyscale_threshold=args.greyscale_threshold,
page=args.page,
crop=args.crop,
line_length=args.line_length,
bitmap_resolution=args.bitmap_resolution,
name=args.name,
pad=args.pad,
white=args.white,
black=args.black))
filenames = dict()
if args.outfile is None:
args.outfile = sys.stdout
filenames["{0}_filename".format(args.t)] = args.outfile
output(cells, args.page, name=args.name, infile=args.infile, output_type=args.t, **filenames)
main()
| PiWare/kicad_library | script/extract-table.py | Python | gpl-2.0 | 5,681 |
from os.path import abspath, dirname, join
class EvidenceCode:
def __init__(self, id=None):
self.id = id
self.name = None
self.definition = None
self.synonyms = set()
self.parents = set()
self.children = set()
def __str__(self):
return "Eco: {id}\n" \
"Name: {name}\n" \
"Definition: {definition}\n".format(id=self.id, name=self.name, definition=self.definition)
def parse_eco(path):
""" Parse the evidence codes provided by https://github.com/evidenceontology/evidenceontology"""
all_ecos = dict()
relationshipts = []
with open(path, "r") as open_file:
new_eco = None
is_obsolete = False
for line in open_file:
line = line.strip()
if "[Term]" in line:
if new_eco is not None and not is_obsolete:
all_ecos[new_eco.id] = new_eco
new_eco = EvidenceCode()
is_obsolete = False
continue
elif "[Typedef]" in line:
if new_eco is not None and not is_obsolete:
all_ecos[new_eco.id] = new_eco
new_eco = None
is_obsolete = False
continue
elif new_eco is None:
continue
split_line = line.split(" ")
if split_line[0] == "id:":
new_eco.id = split_line[1]
elif split_line[0] == "name:":
new_eco.name = " ".join(split_line[1:])
elif split_line[0] == "def:":
new_eco.definition = " ".join(split_line[1:])
elif split_line[0] == "is_a:":
relationshipts.append((split_line[1], new_eco.id))
elif split_line[0] == "synonym:":
new_eco.synonyms.add(" ".join(split_line[1:]))
elif split_line[0] == "is_obsolete:":
is_obsolete = True
if new_eco:
all_ecos[new_eco.id] = new_eco
# Link parents to children
for link in relationshipts:
parent = all_ecos[link[0]]
child = all_ecos[link[1]]
parent.children.add(child)
child.parents.add(parent)
return all_ecos
all_ecos = parse_eco(abspath(join(dirname(abspath(__file__)), "eco.obo"))) | JuBra/GEMEditor | GEMEditor/evidence/eco_parser.py | Python | gpl-3.0 | 2,322 |
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import unittest
import mock
from hpOneView.connection import connection
from hpOneView.resources.storage.drive_enclosures import DriveEnclosures
from hpOneView.resources.resource import ResourceClient
class DriveEnclosuresTest(unittest.TestCase):
DRIVE_ENCLOSURE_ID = "SN123101"
DRIVE_ENCLOSURE_URI = "/rest/drive-enclosures/" + DRIVE_ENCLOSURE_ID
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._drive_enclosures = DriveEnclosures(self.connection)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._drive_enclosures.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(start=2, count=500, filter=filter, sort=sort)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once_with_default(self, mock_get_all):
self._drive_enclosures.get_all()
mock_get_all.assert_called_once_with(start=0, count=-1, filter='', sort='')
@mock.patch.object(ResourceClient, 'get')
def test_get_by_id_called_once(self, mock_get):
self._drive_enclosures.get(self.DRIVE_ENCLOSURE_ID)
mock_get.assert_called_once_with(id_or_uri=self.DRIVE_ENCLOSURE_ID)
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_called_once(self, mock_get_by):
field = 'serialNumber'
value = 'SN123101'
self._drive_enclosures.get_by(field, value)
mock_get_by.assert_called_once_with(field=field, value=value)
@mock.patch.object(ResourceClient, 'build_uri')
@mock.patch.object(ResourceClient, 'get')
def test_get_port_map_called_once(self, mock_get, mock_build_uri):
mock_build_uri.return_value = self.DRIVE_ENCLOSURE_URI
self._drive_enclosures.get_port_map(self.DRIVE_ENCLOSURE_ID)
expected_uri = self.DRIVE_ENCLOSURE_URI + DriveEnclosures.PORT_MAP_PATH
mock_get.assert_called_once_with(id_or_uri=expected_uri)
@mock.patch.object(ResourceClient, 'build_uri')
@mock.patch.object(ResourceClient, 'update')
def test_refresh_state_called_once(self, mock_update, mock_build_uri):
refresh_config = dict(refreshState="RefreshPending")
mock_build_uri.return_value = self.DRIVE_ENCLOSURE_URI
self._drive_enclosures.refresh_state(id_or_uri=self.DRIVE_ENCLOSURE_ID, configuration=refresh_config)
expected_uri = self.DRIVE_ENCLOSURE_URI + DriveEnclosures.REFRESH_STATE_PATH
mock_update.assert_called_once_with(uri=expected_uri, resource=refresh_config, timeout=-1)
@mock.patch.object(ResourceClient, 'patch')
def test_patch_called_once(self, mock_patch):
patch_config = dict(
id_or_uri=self.DRIVE_ENCLOSURE_URI,
operation="replace",
path="/powerState",
value="Off"
)
self._drive_enclosures.patch(**patch_config)
mock_patch.assert_called_once_with(timeout=-1, **patch_config)
| HewlettPackard/python-hpOneView | tests/unit/resources/storage/test_drive_enclosures.py | Python | mit | 4,218 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from flexget.utils import json
from flexget.api import empty_response
from flexget.plugins.api.movie_list import ObjectsContainer as OC
class TestMovieListAPI(object):
config = 'tasks: {}'
def test_movie_list_list(self, api_client, schema_match):
# No params
rsp = api_client.get('/movie_list/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.return_lists, data)
assert not errors
assert data['movie_lists'] == []
# Named param
rsp = api_client.get('/movie_list/?name=name')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.return_lists, data)
assert not errors
payload = {'name': 'test'}
# Create list
rsp = api_client.json_post('/movie_list/', data=json.dumps(payload))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.list_object, data)
assert not errors
values = {
'name': 'test',
'id': 1
}
for field, value in values.items():
assert data.get(field) == value
rsp = api_client.get('/movie_list/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.return_lists, data)
assert not errors
for field, value in values.items():
assert data['movie_lists'][0].get(field) == value
def test_movie_list_list_id(self, api_client, schema_match):
payload = {'name': 'test'}
# Create list
rsp = api_client.json_post('/movie_list/', data=json.dumps(payload))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.list_object, data)
assert not errors
values = {
'name': 'test',
'id': 1
}
for field, value in values.items():
assert data.get(field) == value
# Get list
rsp = api_client.get('/movie_list/1/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.list_object, data)
assert not errors
values = {
'name': 'test',
'id': 1
}
for field, value in values.items():
assert data.get(field) == value
# Delete list
rsp = api_client.delete('/movie_list/1/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(empty_response, data)
assert not errors
def test_movie_list_movies(self, api_client, schema_match):
payload = {'name': 'name'}
# Create list
rsp = api_client.json_post('/movie_list/', data=json.dumps(payload))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
movie_data = {'movie_name': 'title'}
# Add movie to list
rsp = api_client.json_post('/movie_list/1/movies/', data=json.dumps(movie_data))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.movie_list_object, data)
assert not errors
# Get movies from list
rsp = api_client.get('/movie_list/1/movies/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
def test_movie_list_movies_with_identifiers(self, api_client, schema_match):
payload = {'name': 'name'}
# Create list
rsp = api_client.json_post('/movie_list/', data=json.dumps(payload))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
identifier = {'imdb_id': 'tt1234567'}
movie_data = {'movie_name': 'title',
'movie_identifiers': [identifier]}
# Add movie to list
rsp = api_client.json_post('/movie_list/1/movies/', data=json.dumps(movie_data))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.movie_list_object, data)
assert not errors
# Get movies from list
rsp = api_client.get('/movie_list/1/movies/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.return_movies, data)
assert not errors
returned_identifier = data['movies'][0]['movies_list_ids'][0]
assert returned_identifier['id_name'], returned_identifier['id_value'] == identifier.items()[0]
def test_movie_list_movie(self, api_client, schema_match):
payload = {'name': 'name'}
# Create list
rsp = api_client.json_post('/movie_list/', data=json.dumps(payload))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
identifier = {'imdb_id': 'tt1234567'}
movie_data = {'movie_name': 'title',
'movie_identifiers': [identifier]}
# Add movie to list
rsp = api_client.json_post('/movie_list/1/movies/', data=json.dumps(movie_data))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
# Get specific movie from list
rsp = api_client.get('/movie_list/1/movies/1/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.movie_list_object, data)
assert not errors
returned_identifier = data['movies_list_ids'][0]
assert returned_identifier['id_name'], returned_identifier['id_value'] == identifier.items()[0]
identifiers = [{'trakt_movie_id': '12345'}]
# Change specific movie from list
rsp = api_client.json_put('/movie_list/1/movies/1/', data=json.dumps(identifiers))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.movie_list_object, data)
assert not errors
returned_identifier = data['movies_list_ids'][0]
assert returned_identifier['id_name'], returned_identifier['id_value'] == identifiers[0].items()
# Delete specific movie from list
rsp = api_client.delete('/movie_list/1/movies/1/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(empty_response, data)
assert not errors
# Get non existent movie from list
rsp = api_client.get('/movie_list/1/movies/1/')
assert rsp.status_code == 404, 'Response code is %s' % rsp.status_code
# Delete non existent movie from list
rsp = api_client.delete('/movie_list/1/movies/1/')
assert rsp.status_code == 404, 'Response code is %s' % rsp.status_code
| oxc/Flexget | flexget/tests/test_movie_list_api.py | Python | mit | 7,622 |
"""
Shared utilities
"""
import numpy as np
__all__ = ['batchify', 'holdout', 'preprocess', 'inner']
def holdout(batches, frac=0.1):
"""
Take a list and split it into train and test sets
"""
batches = list(batches)
num_holdout = int(np.round(len(batches) * frac))
test = batches[:num_holdout]
train = batches[num_holdout:]
return train, test
def batchify(t, n, randomize=True):
"""
Take a length and break it up into batches
"""
inds = np.arange(t)
if randomize:
np.random.shuffle(inds)
while len(inds) > 0:
yield inds[:n]
inds = np.delete(inds, slice(n))
def preprocess(stimulus, history, zscore=(0., 1.)):
"""
Preprocess stimulus array
"""
stim = np.array(stimulus).astype('float')
stim -= zscore[0]
stim /= zscore[1]
return rolling_window(stim, history, time_axis=0)
def inner(x, y):
"""
Inner product between arrays
"""
return np.inner(x.ravel(), y.ravel())
def rolling_window(array, window, time_axis=0):
"""
Make an ndarray with a rolling window of the last dimension
Parameters
----------
array : array_like
Array to add rolling window to
window : int
Size of rolling window
Returns
-------
Array that is a view of the original array with a added dimension
of size w.
Examples
-------
>>> x=np.arange(10).reshape((2,5))
>>> rolling_window(x, 3)
array([[[0, 1, 2], [1, 2, 3], [2, 3, 4]],
[[5, 6, 7], [6, 7, 8], [7, 8, 9]]])
Calculate rolling mean of last dimension:
>>> np.mean(rolling_window(x, 3),1)
array([[ 1., 2., 3.],
[ 6., 7., 8.]])
"""
if time_axis == 0:
array = array.T
elif time_axis == -1:
pass
else:
raise ValueError('Time axis must be first or last')
assert window >= 1, "`window` must be at least 1."
assert window < array.shape[-1], "`window` is too long."
# with strides
shape = array.shape[:-1] + (array.shape[-1] - window, window)
strides = array.strides + (array.strides[-1],)
arr = np.lib.stride_tricks.as_strided(array, shape=shape, strides=strides)
if time_axis == 0:
return np.rollaxis(arr.T, 1, 0)
else:
return arr
| nirum/limo | limo/utils.py | Python | mit | 2,304 |
from setuptools import setup
# Dynamically calculate the version based on pyperclip.VERSION.
version = __import__('pyperclip').__version__
setup(
name='pyperclip',
version=version,
url='https://github.com/asweigart/pyperclip',
author='Al Sweigart',
author_email='al@inventwithpython.com',
description=('A cross-platform module for GUI automation for human beings. '
'Control the keyboard and mouse from a Python script.'),
license='BSD',
packages=['pyperclip'],
test_suite='tests',
keywords="gui automation test testing keyboard mouse cursor click press keystroke control",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Environment :: MacOS X',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
) | WarriorIng64/NCBI-FASTA-Grabber | pyperclip/setup.py | Python | gpl-3.0 | 1,212 |
# Natural Language Toolkit: Word Finder
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
# Simplified from PHP version by Robert Klein <brathna@gmail.com>
# http://fswordfinder.sourceforge.net/
from __future__ import print_function
import random
from string import strip
# reverse a word with probability 0.5
def revword(word):
if random.randint(1,2) == 1:
return word[::-1]
return word
# try to insert word at position x,y; direction encoded in xf,yf
def step(word, x, xf, y, yf, grid):
for i in range(len(word)):
if grid[xf(i)][yf(i)] != "" and grid[xf(i)][yf(i)] != word[i]:
return False
for i in range(len(word)):
grid[xf(i)][yf(i)] = word[i]
return True
# try to insert word at position x,y, in direction dir
def check(word, dir, x, y, grid, rows, cols):
if dir==1:
if x-len(word)<0 or y-len(word)<0:
return False
return step(word, x, lambda i:x-i, y, lambda i:y-i, grid)
elif dir==2:
if x-len(word)<0:
return False
return step(word, x, lambda i:x-i, y, lambda i:y, grid)
elif dir==3:
if x-len(word)<0 or y+(len(word)-1)>=cols:
return False
return step(word, x, lambda i:x-i, y, lambda i:y+i, grid)
elif dir==4:
if y-len(word)<0:
return False
return step(word, x, lambda i:x, y, lambda i:y-i, grid)
def wordfinder(words, rows=20, cols=20, attempts=50,
alph='ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""
Attempt to arrange words into a letter-grid with the specified
number of rows and columns. Try each word in several positions
and directions, until it can be fitted into the grid, or the
maximum number of allowable attempts is exceeded. Returns a tuple
consisting of the grid and the words that were successfully
placed.
:param words: the list of words to be put into the grid
:type words: list
:param rows: the number of rows in the grid
:type rows: int
:param cols: the number of columns in the grid
:type cols: int
:param attempts: the number of times to attempt placing a word
:type attempts: int
:param alph: the alphabet, to be used for filling blank cells
:type alph: list
:rtype: tuple
"""
# place longer words first
words.sort(cmp=lambda x,y:cmp(len(x),len(y)), reverse=True)
grid = [] # the letter grid
used = [] # the words we used
# initialize the grid
for i in range(rows):
grid.append([""] * cols)
# try to place each word
for word in words:
word = strip(word).upper() # normalize
save = word # keep a record of the word
word = revword(word)
for attempt in range(attempts):
r = random.randint(0, len(word))
dir = random.choice([1,2,3,4])
x = random.randint(0,rows)
y = random.randint(0,cols)
if dir==1: x+=r; y+=r
elif dir==2: x+=r
elif dir==3: x+=r; y-=r
elif dir==4: y+=r
if 0<=x<rows and 0<=y<cols:
if check(word, dir, x, y, grid, rows, cols):
# used.append((save, dir, x, y, word))
used.append(save)
break
# Fill up the remaining spaces
for i in range(rows):
for j in range(cols):
if grid[i][j] == '':
grid[i][j] = random.choice(alph)
return grid, used
def word_finder():
from nltk.corpus import words
wordlist = words.words()
random.shuffle(wordlist)
wordlist = wordlist[:200]
wordlist = [w for w in wordlist if 3 <= len(w) <= 12]
grid, used = wordfinder(wordlist)
print("Word Finder\n")
for i in range(len(grid)):
for j in range(len(grid[i])):
print(grid[i][j], end=' ')
print()
print()
for i in range(len(used)):
print("%d:" % (i+1), used[i])
if __name__ == '__main__':
word_finder()
| abad623/verbalucce | verbalucce/nltk/misc/wordfinder.py | Python | apache-2.0 | 4,113 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-12 15:52
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('bb', '0016_auto_20160612_1750'),
]
operations = [
migrations.AlterField(
model_name='throw',
name='event_time',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 12, 15, 52, 34, 64000, tzinfo=utc)),
),
]
| yemmitt/bb_website | bb_django/mysite/bb/migrations/0017_auto_20160612_1752.py | Python | mit | 562 |
#!/usr/bin/env python3
'''
Created on 1-June-2016
@author: Asawari.Vaidya
'''
from PythonNetBanxSDK.CardPayments.BillingDetails import BillingDetails
from PythonNetBanxSDK.CustomerVault.BACSBankAccount import BACSBankAccount
from PythonNetBanxSDK.CustomerVault.EFTBankAccount import EFTBankAccount
from PythonNetBanxSDK.CustomerVault.Profile import Profile
from PythonNetBanxSDK.DirectDebit.StandaloneCredits import StandaloneCredits
from PythonNetBanxSDK.OptimalApiClient import OptimalApiClient
from utils.Utils import Utils
from Config import Config
from RandomTokenGenerator import RandomTokenGenerator
optimal_obj = OptimalApiClient(Config.api_key, Config.api_password, Config.environment, Config.account_number_EFT)
standalone_Obj = StandaloneCredits(None)
standalone_Obj.merchantRefNum(RandomTokenGenerator().generateToken())
standalone_Obj.amount("10098")
standalone_Obj.customerIp("192.0.126.111")
eftbank_Obj = EFTBankAccount(None)
eftbank_Obj.accountHolderName("XYZ Company")
eftbank_Obj.accountNumber("335892")
eftbank_Obj.transitNumber("22446")
eftbank_Obj.institutionId("001")
profile_Obj = Profile(None)
profile_Obj.firstName("Joe")
profile_Obj.lastName("Smith")
profile_Obj.email("Joe.Smith@hotmail.com")
billingdetails_Obj = BillingDetails(None)
billingdetails_Obj.street("100 Queen Street West")
billingdetails_Obj.city("Ottawa")
billingdetails_Obj.state("ON")
billingdetails_Obj.country("CA")
billingdetails_Obj.zip("90210")
billingdetails_Obj.phone("6139991100")
standalone_Obj.profile(profile_Obj)
standalone_Obj.billingDetails(billingdetails_Obj)
standalone_Obj.eft(eftbank_Obj)
response_object = optimal_obj.direct_debit_service_handler().submit_standalone(standalone_Obj)
print ("\nResponse Values ==========> ")
Utils.print_response(response_object)
| OptimalPayments/Python_SDK | src/sample_application/DirectDebitEFTStandaloneCreditpurchase.py | Python | mit | 1,785 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SelfHostedIntegrationRuntimeNode(Model):
"""Properties of Self-hosted integration runtime node.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar node_name: Name of the integration runtime node.
:vartype node_name: str
:ivar machine_name: Machine name of the integration runtime node.
:vartype machine_name: str
:ivar host_service_uri: URI for the host machine of the integration
runtime.
:vartype host_service_uri: str
:ivar status: Status of the integration runtime node. Possible values
include: 'NeedRegistration', 'Online', 'Limited', 'Offline', 'Upgrading',
'Initializing', 'InitializeFailed'
:vartype status: str or
~azure.mgmt.datafactory.models.SelfHostedIntegrationRuntimeNodeStatus
:ivar capabilities: The integration runtime capabilities dictionary
:vartype capabilities: dict[str, str]
:ivar version_status: Status of the integration runtime node version.
:vartype version_status: str
:ivar version: Version of the integration runtime node.
:vartype version: str
:ivar register_time: The time at which the integration runtime node was
registered in ISO8601 format.
:vartype register_time: datetime
:ivar last_connect_time: The most recent time at which the integration
runtime was connected in ISO8601 format.
:vartype last_connect_time: datetime
:ivar expiry_time: The time at which the integration runtime will expire
in ISO8601 format.
:vartype expiry_time: datetime
:ivar last_start_time: The time the node last started up.
:vartype last_start_time: datetime
:ivar last_stop_time: The integration runtime node last stop time.
:vartype last_stop_time: datetime
:ivar last_update_result: The result of the last integration runtime node
update. Possible values include: 'Succeed', 'Fail'
:vartype last_update_result: str or
~azure.mgmt.datafactory.models.IntegrationRuntimeUpdateResult
:ivar last_start_update_time: The last time for the integration runtime
node update start.
:vartype last_start_update_time: datetime
:ivar last_end_update_time: The last time for the integration runtime node
update end.
:vartype last_end_update_time: datetime
:ivar is_active_dispatcher: Indicates whether this node is the active
dispatcher for integration runtime requests.
:vartype is_active_dispatcher: bool
:ivar concurrent_jobs_limit: Maximum concurrent jobs on the integration
runtime node.
:vartype concurrent_jobs_limit: int
:ivar max_concurrent_jobs: The maximum concurrent jobs in this integration
runtime.
:vartype max_concurrent_jobs: int
"""
_validation = {
'node_name': {'readonly': True},
'machine_name': {'readonly': True},
'host_service_uri': {'readonly': True},
'status': {'readonly': True},
'capabilities': {'readonly': True},
'version_status': {'readonly': True},
'version': {'readonly': True},
'register_time': {'readonly': True},
'last_connect_time': {'readonly': True},
'expiry_time': {'readonly': True},
'last_start_time': {'readonly': True},
'last_stop_time': {'readonly': True},
'last_update_result': {'readonly': True},
'last_start_update_time': {'readonly': True},
'last_end_update_time': {'readonly': True},
'is_active_dispatcher': {'readonly': True},
'concurrent_jobs_limit': {'readonly': True},
'max_concurrent_jobs': {'readonly': True},
}
_attribute_map = {
'node_name': {'key': 'nodeName', 'type': 'str'},
'machine_name': {'key': 'machineName', 'type': 'str'},
'host_service_uri': {'key': 'hostServiceUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': '{str}'},
'version_status': {'key': 'versionStatus', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'register_time': {'key': 'registerTime', 'type': 'iso-8601'},
'last_connect_time': {'key': 'lastConnectTime', 'type': 'iso-8601'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
'last_start_time': {'key': 'lastStartTime', 'type': 'iso-8601'},
'last_stop_time': {'key': 'lastStopTime', 'type': 'iso-8601'},
'last_update_result': {'key': 'lastUpdateResult', 'type': 'str'},
'last_start_update_time': {'key': 'lastStartUpdateTime', 'type': 'iso-8601'},
'last_end_update_time': {'key': 'lastEndUpdateTime', 'type': 'iso-8601'},
'is_active_dispatcher': {'key': 'isActiveDispatcher', 'type': 'bool'},
'concurrent_jobs_limit': {'key': 'concurrentJobsLimit', 'type': 'int'},
'max_concurrent_jobs': {'key': 'maxConcurrentJobs', 'type': 'int'},
}
def __init__(self):
self.node_name = None
self.machine_name = None
self.host_service_uri = None
self.status = None
self.capabilities = None
self.version_status = None
self.version = None
self.register_time = None
self.last_connect_time = None
self.expiry_time = None
self.last_start_time = None
self.last_stop_time = None
self.last_update_result = None
self.last_start_update_time = None
self.last_end_update_time = None
self.is_active_dispatcher = None
self.concurrent_jobs_limit = None
self.max_concurrent_jobs = None
| AutorestCI/azure-sdk-for-python | azure-mgmt-datafactory/azure/mgmt/datafactory/models/self_hosted_integration_runtime_node.py | Python | mit | 6,083 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 29 21:55:06 2018
@author: leandrodemarcovedelago
"""
import numpy as np
import numpy.matlib as npmatlib
import math
import Utils
class Acor:
def __init__(self, alg_variant, uses_log):
"""
* alg_variant should be one of the following strings:
'ContinuoLibre', 'ContinuoFijo', 'Vecinos', 'DiscretoPuro'
* uses_log: boolean indicating whether or not to use logarithm
of components instead of their regular value
"""
self.alg_variant = alg_variant
self.uses_log = uses_log
self.utils = Utils.Utils()
self.num_dimensions = 5 if alg_variant == 'ContinuoLibre' else 4
self.num_resistors = 3 if alg_variant == 'ContinuoLibre' else 2
def _calc_comp_val_discrete(self, means, sigmas, comp_idx, p):
"""
This function is used to discretize the value of a filter component
from a continuous calculalted value by ACOR when using the
variant 'DiscretoPuro'
* means: array of means
* sigmas: array of standard deviations
* comp_idx: index of the component to discretize
* p: probabilities array
"""
i = comp_idx
res_vals, cap_vals = self.utils.res_vals, self.utils.cap_vals
log_res_vals = self.utils.log_res_vals
log_cap_vals = self.utils.log_cap_vals
# Select Gaussian Kernel
l = Utils.wheel_selection(p)
# Generate Gaussian Random Variable
aux = means[l][i] + sigmas[l][i] * np.random.randn()
is_resistor = i < self.num_resistors
if (is_resistor and not self.uses_log):
vals_to_use = res_vals
elif (is_resistor and self.uses_log):
vals_to_use = log_res_vals
elif (not is_resistor and not self.uses_log):
vals_to_use = cap_vals
else:
vals_to_use = log_cap_vals
idx = np.abs(vals_to_use - aux).argmin()
return vals_to_use[idx]
def _initialize_archive(self, R1):
res_min, res_max = self.utils.res_min, self.utils.res_max
cap_min, cap_max = self.utils.cap_min, self.utils.cap_max
num_dim = self.num_dimensions
archive_size = self.utils.archive_size
cost = self.utils.cost
empty_ant = np.empty([num_dim + 1])
archive = npmatlib.repmat(empty_ant, archive_size, 1)
for i in range(0, archive_size):
for j in range(0, num_dim + 1):
if (j < self.num_resistors):
# Resistor
low = math.log(res_min) if self.uses_log else res_min
high = math.log(res_max) if self.uses_log else res_max
archive[i][j] = np.random.uniform(low, high)
elif (j < num_dim):
# Capacitor
low = math.log(cap_min) if self.uses_log else cap_min
high = math.log(cap_max) if self.uses_log else cap_max
archive[i][j] = np.random.uniform(low, high)
else:
# Cost
archive[i][j] = cost(archive[i][0:num_dim], self.uses_log,
R1)
return archive
def main_loop(self, R1 = None):
archive_size = self.utils.archive_size
num_dim = self.num_dimensions
max_iterations = self.utils.max_iterations
int_factor = self.utils.intensification_factor
zeta = self.utils.zeta
sample_size = self.utils.sample_size
cost = self.utils.cost
use_log = self.uses_log
# Hold data of evolution for cost and variables through execution
self.best_cost = np.zeros([max_iterations])
self.best_r1 = np.zeros([max_iterations])
self.best_r2 = np.zeros([max_iterations])
self.best_r3 = np.zeros([max_iterations])
self.best_c4 = np.zeros([max_iterations])
self.best_c5 = np.zeros([max_iterations])
archive = self._initialize_archive(R1)
archive = archive[archive[:,num_dim].argsort()]
# Weights array
w = np.empty([archive_size])
for l in range(0, archive_size):
f_factor = 1/(math.sqrt(2*math.pi)*int_factor*archive_size)
s_factor = math.exp(-0.5*(l/(int_factor*archive_size))**2)
w[l] = f_factor * s_factor
# Selection probabilities
p = w / np.sum(w)
# ACOR Main Loop
empty_ant = np.empty([num_dim + 1])
for it in range(0, max_iterations):
# Means
s = np.zeros([archive_size, num_dim])
for l in range(0, archive_size):
s[l] = archive[l][0:num_dim]
# Standard deviations
sigma = np.zeros([archive_size, num_dim])
for l in range(0, archive_size):
D = 0
for r in range(0, archive_size):
D += abs(s[l]-s[r])
sigma[l] = zeta * D / (archive_size - 1)
# Create new population array
new_population = np.matlib.repmat(empty_ant, sample_size, 1)
# Initialize solution for each new ant
for t in range(0, sample_size):
new_population[t][0:num_dim] = np.zeros([num_dim])
for i in range(0, num_dim):
if (self.alg_variant == 'DiscretoPuro'):
comp_val = self._calc_comp_val_discrete(s, sigma, i, p)
new_population[t][i] = comp_val
else:
# Select Gaussian Kernel
l = Utils.wheel_selection(p)
# Generate Gaussian Random Variable
new_population[t][i] = (s[l][i]
+ sigma[l][i]*np.random.randn())
# Evaluation of built solution
filter_comps = new_population[t][0:num_dim]
new_population[t][num_dim] = cost(filter_comps, use_log, R1)
# Merge old population (archive) with new one
merged_pop = np.concatenate([archive, new_population])
# And sort it again
merged_pop = merged_pop[merged_pop[:,num_dim].argsort()]
# Store the bests in the archive and update best sol
archive = merged_pop[:archive_size]
best_sol = archive[0][0:num_dim] # Current best solution, NO cost
self.best_cost[it] = archive[0][num_dim] # Current best cost
self.best_r1[it] = R1 if R1 != None else best_sol[0]
if self.uses_log and R1 != None:
self.best_r1[it] = math.log(R1)
self.best_r2[it] = best_sol[0] if R1 != None else best_sol[1]
self.best_r3[it] = best_sol[1] if R1 != None else best_sol[2]
self.best_c4[it] = best_sol[2] if R1 != None else best_sol[3]
self.best_c5[it] = best_sol[3] if R1 != None else best_sol[4]
return archive[0] # Best population and cost
| leandrodemarcovedelago/thesis-aco | informe/ACOR.py | Python | gpl-3.0 | 7,338 |
#-*- coding:utf-8 -*-
import sys
import struct
import array, io, fcntl
import smbus
from gevent import sleep
from gsensors import AutoUpdateValue, DataSource
class I2CRaw(object):
def __init__(self, device, bus):
self.fr = io.open("/dev/i2c-"+str(bus), "rb", buffering=0)
self.fw = io.open("/dev/i2c-"+str(bus), "wb", buffering=0)
# set device address
I2C_SLAVE=0x0703
fcntl.ioctl(self.fr, I2C_SLAVE, device)
fcntl.ioctl(self.fw, I2C_SLAVE, device)
def write(self, bytes):
self.fw.write(bytes)
def read(self, bytes):
return self.fr.read(bytes)
def close(self):
self.fw.close()
self.fr.close()
class I2CBus():
def __init__(self, i2c_slave, i2c_bus=0):
self._bus = smbus.SMBus(i2c_bus)
self._i2c_slave = i2c_slave
#TODO: check i2c device exist
def read_block(self, cmd, nb_bytes):
bytes = self._bus.read_i2c_block_data(self._i2c_slave, cmd, nb_bytes)
return bytes
def read_byte(self, cmd):
result = self._bus.read_byte_data(self._i2c_slave, cmd)
return result
def read_cast(self, cmd, nb_bytes=4, cast="f"):
"""
to see all possible cast:
http://docs.python.org/2/library/struct.html#format-characters
"""
bytes = self.read_block(cmd, nb_bytes=nb_bytes)
return struct.unpack(cast, "".join(map(chr, bytes)))[0]
def read_float(self, cmd):
return self.read_cast(cmd, 4, "f")
def read_long(self, cmd):
return self.read_cast(cmd, 4, "l")
def read_unsigned_long(self, cmd):
return self.read_cast(cmd, 4, "l")
class BMP085(AutoUpdateValue):
""" interface for pressure mesurement with an I2C BMP085 sensor
depend on adafruit code:
https://github.com/adafruit/Adafruit_Python_BMP
"""
update_freq = 30
def __init__(self, name=None):
AutoUpdateValue.__init__(self, name=name)
from Adafruit_BMP import BMP085
self.bmp = BMP085.BMP085()
self._temp = None
self._pressure = None
@property
def temp(self):
if self._temp is None:
name = "%s.temp" % self.__class__.__name__
self._temp = DataSource(name=name, unit="°C", timeout=None)
return self._temp
@property
def pressure(self):
if self._pressure is None:
name = "%s.temp" % self.__class__.__name__
self._pressure = DataSource(name=name, unit="hPa", timeout=None)
return self._pressure
def update(self):
if self._temp:
self._temp.value = self.bmp.read_temperature()
if self._pressure:
self._pressure.value = self.bmp.read_pressure()
class LightBH1750(AutoUpdateValue):
""" interface for an I2C BH1750 light sensor
"""
update_freq = 30
unit = "lx"
def __init__(self, i2c_bus=1, name=None):
AutoUpdateValue.__init__(self, name=name)
self.device_addr = 0x23
self._bus = I2CBus(i2c_slave=self.device_addr, i2c_bus=i2c_bus)
def update(self):
val = self._bus.read_cast(0x21, 2, ">H") / 1.2
self.set_value(val)
class HTU21D(AutoUpdateValue):
#cf https://www.raspberrypi.org/forums/viewtopic.php?f=32&t=84966
update_freq = 30
# HTU21D Address
device_addr = 0x40
# Commands
CMD_READ_TEMP_HOLD = "\xE3"
CMD_READ_HUM_HOLD = "\xE5"
CMD_READ_TEMP_NOHOLD = "\xF3"
CMD_READ_HUM_NOHOLD = "\xF5"
CMD_WRITE_USER_REG = "\xE6"
CMD_READ_USER_REG = "\xE7"
CMD_SOFT_RESET= "\xFE"
def __init__(self, i2c_bus=1, name=None):
AutoUpdateValue.__init__(self, name=name)
self.dev = I2CRaw(device=self.device_addr, bus=i2c_bus)
self._temp = None
self._hum = None
self.dev.write(self.CMD_SOFT_RESET) #soft reset
sleep(.1)
def update(self):
if self._temp:
self._temp.value = self.read_temp()
if self._hum:
if self._temp:
sleep(.2)
self._hum.value = self.read_hum()
@property
def temp(self):
if self._temp is None:
name = "%s.temp" % self.__class__.__name__
self._temp = DataSource(name=name, unit="°C", timeout=None)
return self._temp
@property
def hum(self):
if self._hum is None:
name = "%s.hum" % self.__class__.__name__
self._hum = DataSource(name=name, unit="%", timeout=None)
return self._hum
def ctemp(self, sensorTemp):
tSensorTemp = sensorTemp / 65536.0
return -46.85 + (175.72 * tSensorTemp)
def chumid(self, sensorHumid):
tSensorHumid = sensorHumid / 65536.0
return -6.0 + (125.0 * tSensorHumid)
def crc8check(self, value):
# Ported from Sparkfun Arduino HTU21D Library: https://github.com/sparkfun/HTU21D_Breakout
remainder = ( ( value[0] << 8 ) + value[1] ) << 8
remainder |= value[2]
# POLYNOMIAL = 0x0131 = x^8 + x^5 + x^4 + 1
# divsor = 0x988000 is the 0x0131 polynomial shifted to farthest left of three bytes
divsor = 0x988000
for i in range(0, 16):
if( remainder & 1 << (23 - i) ):
remainder ^= divsor
divsor = divsor >> 1
return remainder == 0
def read_temp(self):
self.dev.write(self.CMD_READ_TEMP_NOHOLD) #measure temp
sleep(.1)
data = self.dev.read(3)
buf = array.array('B', data)
if not self.crc8check(buf):
raise ValueError("Invalid reading")
temp = (buf[0] << 8 | buf [1]) & 0xFFFC
return self.ctemp(temp)
def read_hum(self):
self.dev.write(self.CMD_READ_HUM_NOHOLD) #measure humidity
sleep(.1)
data = self.dev.read(3)
buf = array.array('B', data)
if not self.crc8check(buf):
raise ValueError("Invalid reading")
humid = (buf[0] << 8 | buf [1]) & 0xFFFC
return self.chumid(humid)
| enavarro222/gsensors | gsensors/i2c_devices.py | Python | agpl-3.0 | 6,065 |
import keyedcache
import logging
log = logging.getLogger(__name__)
class CachedObjectMixin(object):
"""Provides basic object keyedcache for any objects using this as a mixin.
The class name of the object should be unambiguous.
"""
def cache_delete(self, *args, **kwargs):
key = self.cache_key(*args, **kwargs)
log.debug("clearing cache for %s", key)
keyedcache.cache_delete(key, children=True)
def cache_get(self, *args, **kwargs):
key = self.cache_key(*args, **kwargs)
return keyedcache.cache_get(key)
def cache_key(self, *args, **kwargs):
keys = [self.__class__.__name__, self]
keys.extend(args)
return keyedcache.cache_key(keys, **kwargs)
def cache_reset(self):
self.cache_delete()
self.cache_set()
def cache_set(self, *args, **kwargs):
val = kwargs.pop('value', self)
key = self.cache_key(*args, **kwargs)
keyedcache.cache_set(key, value=val)
def is_cached(self, *args, **kwargs):
return keyedcache.is_cached(self.cache_key(*args, **kwargs))
# Unused functions find_by_id, find_by_key, find_by_slug are coming from
# Satchmo but are currently unused also there.
def find_by_id(cls, groupkey, objectid, raises=False):
"""A helper function to look up an object by id"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, objectid)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(pk=objectid)
keyedcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, objectid)
if raises:
raise cls.DoesNotExist
return ob
def find_by_key(cls, groupkey, key, raises=False):
"""A helper function to look up an object by key"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, key)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(key__exact=key)
keyedcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, key)
if raises:
raise
return ob
def find_by_slug(cls, groupkey, slug, raises=False):
"""A helper function to look up an object by slug"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, slug)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(slug__exact=slug)
keyedcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, slug)
if raises:
raise
return ob
| aronysidoro/django-livesettings | live/keyedcache/models.py | Python | bsd-3-clause | 2,736 |
#!/usr/bin/env python
# Plot blue-red-purple election results for the 2016 presidential election.
# Expects the following data files:
# data/state-shapes/st99_d00* from mpl basemap examples directory
# (/usr/share/doc/python-mpltoolkits.basemap-doc/examples/ on debian
# or https://github.com/matplotlib/basemap/blob/master/examples/ )
# data//Deleetdk/counties-opendatasoft-2016.csv from
# https://public.opendatasoft.com/explore/dataset/usa-2016-presidential-election-by-county/export/
# (full URL:
# https://public.opendatasoft.com/explore/dataset/usa-2016-presidential-election-by-county/download/?format=csv&timezone=America/Denver&use_labels_for_header=true )
# Copyright Akkana Peck under the GPLv2 or later, share and enjoy.
# Good tutorial that explains the args (unlike the Basemap doc):
# http://www.datadependence.com/2016/06/creating-map-visualisations-in-python/
import sys
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
# We need CSV to parse the county data:
import csv
# but then the county shapes are encoded inside the CSV as JSON, so
import simplejson as json
def map_from_bounding_box(bbox):
'''Draws a map from a bounding box that's a dict with
lllon, lllat, urlon, urlat.
'''
centerlon = float(bbox['lllon'] + bbox['urlon']) / 2.0
centerlat = float(bbox['lllat'] + bbox['urlat']) / 2.0
m = Basemap(resolution='i', # crude, low, intermediate, high, full
llcrnrlon = bbox['lllon'], urcrnrlon = bbox['urlon'],
lon_0 = centerlon,
llcrnrlat = bbox['lllat'], urcrnrlat = bbox['urlat'],
lat_0 = centerlat,
projection='tmerc')
# map.drawmapboundary(fill_color='ivory')
# map.fillcontinents(color='coral', lake_color='aqua')
# m.drawcoastlines()
# m.drawcountries()
# drawstates() is built into basemap -- but then there's no
# way to associate the shapes with state names. Hence the shape files.
# m.drawstates()
# draw state boundaries.
# data from U.S Census Bureau
# http://www.census.gov/geo/www/cob/st2000.html
shp_info = m.readshapefile('data/state-shapes/st99_d00','states',
drawbounds=True)
# Wouldn't it be nice if there was documentation anywhere on what
# readshapefile() does? But there isn't, so:
# After readshapefile(), m will have two new members:
#
# m.states_info[] is a list of dicts with the following useful info:
# NAME, AREA, PERIMETER
# plus the following arcane and undocumented members:
# DIVISION, RINGNUM, REGION, LSAD, LSAD_TRANS,
# STATE, ST99_D00_I, SHAPENUM, ST99_D00_
#
# m.states[] is a list of lists of 2-tuples of numbers, not coordinates,
# like (-745649.3757546246, 6074729.819906185).
#
# If you want to do anything with the shapes by state name,
# you have to iterate through m.states_info looking for the name,
# note the index, then use that same index into m.states.
return m
def map_county_data(m):
'''
'''
# fp = open("data/alamos.csv")
csv.field_size_limit(sys.maxsize)
fp = open("data/Deleetdk/counties-opendatasoft-2016.csv")
reader = csv.DictReader(fp, delimiter=';')
ax = plt.gca() # get current axes instance
for county in reader:
# print county["County"], county["State"]
try:
countyshape = json.loads(county["Geo Shape"])
shapecoords = countyshape["coordinates"]
except Exception, e:
# Some fields don't have GeoShape in this file, like in Alaska.
print county["County"], county["State"], \
": No GeoShape. Or something:", e
continue
# What color is this county?
dem = float(county["Clinton H"])
rep = float(county["Trump D"])
# pop = float(county["Total.Population"])
pop = float(county["votes"])
# print county["County"], ":", dem, rep, pop, county["votes"]
blue = dem/pop
red = rep/pop
color = (red, 0, blue)
# Some counties have a list of lists of coordinate pairs,
# a few big ones have a list of lists of lists of pairs.
# So we need to factor the handling of the final list:
def handle_subregion(subregion):
for coord_pair in subregion:
coord_pair[0], coord_pair[1] = m(coord_pair[0], coord_pair[1])
poly = Polygon(subregion, facecolor=color, edgecolor=color)
ax.add_patch(poly)
if countyshape["type"] == "Polygon":
for subregion in shapecoords:
handle_subregion(subregion)
elif countyshape["type"] == "MultiPolygon":
for subregion in shapecoords:
for sub1 in subregion:
handle_subregion(sub1)
else:
print "Skipping", county["County"], \
"because of unknown type", countyshape["type"]
def init_map():
'''Draw a map of the US, upon which we can graph county results.
'''
bbox = { 'lllon': -119, 'urlon': -64, 'lllat': 22.0, 'urlat': 50. }
m = map_from_bounding_box(bbox)
return m
def show_map():
# This gets rid of most of the extra horizontal whitespace,
# and about half the vertical:
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.title('The Map')
plt.show()
# plt.savefig('test.png')
if __name__ == "__main__":
m = init_map()
map_county_data(m)
show_map()
| akkana/scripts | mapping/election2016/bluered-opendatasoft.py | Python | gpl-2.0 | 5,561 |
import numpy as np
import pandas as pd
from lsst.sims.catUtils.supernovae import SNObject
from opsimsummary import summarize_opsim as oss
from astropy.table import Table
__all__ = ['SNObs']
class SNObs(oss.SummaryOpsim):
def __init__(self, t0, fieldID=None, raCol=None, decCol=None, ra=0.,
dec=0., peakabsmagBessellB=-19.3,
summarydf=None, snState={'z':0.5}, lsst_bp=None):
oss.SummaryOpsim.__init__(self, summarydf=summarydf)
self.fieldID = fieldID
self.raCol = raCol
self.decCol = decCol
self._ra = np.radians(ra)
self._dec = np.radians(dec)
self.summary = summarydf
self._peakabsmagBessellB = peakabsmagBessellB
self.t0 = t0
self._lc = None
self._numDropped = None
self._snState = snState
self.lsst_bp = lsst_bp
self.lowrange = -30.
self.highrange = 50.
@property
def radeg(self):
if self._ra != 0. and self._dec != 0.:
return np.degrees(self._ra)
if self.fieldID is not None:
ra = self.ra(self.fieldID)
elif self.raCol is not None:
ra = self.summary[self.raCol].iloc[0]
else:
ra = self._ra
return np.degrees(ra)
@property
def decdeg(self):
if self._dec != 0. and self._dec != 0.:
return np.degrees(self._dec)
if self.fieldID is not None:
dec = self.dec(self.fieldID)
elif self.decCol is not None:
dec = self.summary[self.decCol].iloc[0]
else:
dec = self._dec
return np.degrees(dec)
@property
def snState(self):
if self.SN.SNstate is None:
SNstate = self._snState
else:
SNstate = self.SN.SNstate
return SNstate
@snState.setter
def snState(self, value):
self._snState = value
return self._snState
@property
def SN(self):
"""
`lsst.sims.catsim.SNObject` instance with peakMJD set to t0
"""
#if self.snState is not None:
# return SNObject.fromSNState(self.snState)
sn = SNObject(ra=self.radeg, dec=self.decdeg)
sn.set(t0=self.t0)
sn.set(**self._snState)
sn.set_source_peakabsmag(self._peakabsmagBessellB, 'bessellB', 'ab')
return sn
def SNCosmoLC(self, scattered=False, seed=0):
lc = self.lightcurve
lc['modelFlux'] = lc['flux']
# add scatter if desired
np.random.seed(seed)
lc['deviation'] = np.random.normal(size=len(lc['flux']))
if scattered:
lc['flux'] = lc['flux'] + lc['deviation'] * lc['fluxerr']
return Table(lc.to_records())
@property
def lightcurve(self, lowrange=-30., highrange=50. ):
sn = self.SN
# dataframe.set_index('obsHistID')
# timewindowlow
timelow = sn.get('t0') + lowrange
timehigh = sn.get('t0') + highrange
# Model range
modellow = sn.mintime()
modelhigh = sn.maxtime()
if modellow > timelow:
timelow = modellow
if modelhigh < timehigh:
timehigh = modelhigh
if self.fieldID is None:
dataframe = self.summary
else:
dataframe = self.simlib(fieldID=self.fieldID)
x = dataframe.query('expMJD > @timelow and expMJD < @timehigh')
df = x.copy(deep=True)
colnames = ['time', 'band', 'flux', 'fluxerr', 'zp', 'zpsys', 'SNR',
'finSeeing', 'airmass', 'filtSkyBrightness','fiveSigmaDepth',
'propID', 'night', 'DetectionEfficiency']
df['band'] = df['filter'].apply(lambda x: x.lower())
df['flux'] = df.apply(lambda row: sn.catsimBandFlux(row['expMJD'],
self.lsst_bp[row['band']]), axis=1)
df['fluxerr'] = df.apply(lambda row: sn.catsimBandFluxError(row['expMJD'],
self.lsst_bp[row['band']],
m5=row['fiveSigmaDepth']), axis=1)
df['zp'] = 0.
df['zpsys'] = 'ab'
df.rename(columns={'expMJD':'time'}, inplace=True)
os = len(df)
df = df.query('flux > 0. and fluxerr > 0.')
s = len(df)
df['SNR'] = df['flux'] / df['fluxerr']
return df
| rbiswas4/Cadence | gedankenLSST/sninLSST.py | Python | mit | 4,355 |
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import shlex
import subprocess
import sys
import os
from utils import logger
try:
if hasattr(sys, "_run_from_cmdl") is True:
raise ImportError
from pycompss.api.parameter import FILE_IN, FILE_OUT, IN, OUT
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
except ImportError:
logger.warn("[Warning] Cannot import \"pycompss\" API packages.")
logger.warn(" Using mock decorators.")
from utils.dummy_pycompss import FILE_IN, FILE_OUT, IN, OUT # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import task # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import compss_wait_on # pylint: disable=ungrouped-imports
from basic_modules.metadata import Metadata
from basic_modules.tool import Tool
# ------------------------------------------------------------------------------
class trimgalore(Tool): # pylint: disable=invalid-name
"""
Tool for trimming FASTQ reads that are of low quality
"""
def __init__(self, configuration=None):
"""
Init function
"""
logger.info("TrimGalore FASTQ read trimming")
Tool.__init__(self)
if configuration is None:
configuration = {}
self.configuration.update(configuration)
@task(returns=str, version=OUT, isModifier=False)
def trimgalore_version(self): # pylint: disable=no-self-use
"""
Trims and removes low quality subsections and reads from a singed-ended
FASTQ file
Parameters
----------
fastq_file_in : str
Location of the input fastq file
fastq_file_out : str
Location of the output fastq file
params : dict
Parameters to use in TrimGalore
Returns
-------
bool
Indicator of the success of the function
"""
command_line = "trim_galore --version"
logger.info("TRIM GALORE: command_line: " + command_line)
try:
args = shlex.split(command_line)
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
version, proc_err = process.communicate()
logger.info("TRIM GALORE stdout" + version)
logger.info("TRIM GALORE stderr" + proc_err)
except (OSError, IOError) as msg:
logger.fatal("I/O error({0}) - trim_galore: {1}\n{2}".format(
msg.errno, msg.strerror, command_line))
return ""
return version
@task(returns=bool,
fastq_file_in=FILE_IN, fastq_file_out=FILE_OUT, fastq_report=FILE_OUT,
params=IN, isModifier=False)
def trimgalore_single(self, fastq_file_in, fastq_file_out, fastq_report, params): # pylint: disable=no-self-use,too-many-locals
"""
Trims and removes low quality subsections and reads from a singed-ended
FASTQ file
Parameters
----------
fastq_file_in : str
Location of the input fastq file
fastq_file_out : str
Location of the output fastq file
params : dict
Parameters to use in TrimGalore
Returns
-------
bool
Indicator of the success of the function
"""
if os.path.isfile(fastq_file_in) is False or os.path.getsize(fastq_file_in) <= 4096:
logger.fatal("FILE NOT FOUND: " + fastq_file_in)
return False
# Output file name used by TrimGalore
fastq_trimmed = os.path.split(fastq_file_in)
fastq_trimmed = os.path.split(os.path.join(fastq_trimmed[0], "tmp", fastq_trimmed[1]))
tail_substring = "fastq"
if ".fq" in fastq_trimmed[1]:
tail_substring = "fq"
gzipped = False
if fastq_trimmed[1][-3:] == ".gz":
gzipped = True
if gzipped:
tg_tmp_out = os.path.join(fastq_trimmed[0], fastq_trimmed[1])
tg_tmp_out = tg_tmp_out.replace(
"." + tail_substring + ".gz",
"_trimmed.fq.gz"
)
else:
tg_tmp_out = os.path.join(fastq_trimmed[0], fastq_trimmed[1])
tg_tmp_out = tg_tmp_out.replace(
"." + tail_substring,
"_trimmed.fq"
)
try:
logger.info("CREATE TMP FOLDER: " + fastq_trimmed[0])
os.mkdir(fastq_trimmed[0])
except (OSError, IOError) as msg:
logger.warn("I/O error({0}) - tmp folder already exists: {1}".format(
msg.errno, msg.strerror))
command_line = "trim_galore " + " ".join(params) + " "
command_line += "-o " + fastq_trimmed[0] + " "
command_line += fastq_file_in
logger.info("TRIM GALORE: command_line: " + command_line)
try:
args = shlex.split(command_line)
process = subprocess.Popen(args)
process.wait()
except (OSError, IOError) as msg:
logger.fatal("I/O error({0}) - trim_galore: {1}\n{2}".format(
msg.errno, msg.strerror, command_line))
return False
try:
with open(fastq_file_out, "wb") as f_out:
with open(tg_tmp_out, "rb") as f_in:
f_out.write(f_in.read())
except (OSError, IOError) as error:
logger.fatal(
"I/O error({0}) - TRIMMED FASTQ: {1}\nREADING: {2}\nWRITING: {3}".format(
error.errno, error.strerror, tg_tmp_out, fastq_file_out
)
)
return False
try:
trimmed_report = os.path.join(
fastq_trimmed[0], fastq_trimmed[1] + "_trimming_report.txt"
)
with open(fastq_report, "wb") as f_out:
with open(trimmed_report, "rb") as f_in:
f_out.write(f_in.read())
except (OSError, IOError) as error:
logger.fatal(
"I/O error({0}) - TRIMMING REPORT FASTQ 1: {1}\nWRITE: {2}\nREAD: {3}".format(
error.errno, error.strerror, fastq_report,
trimmed_report
))
return False
return True
@task(returns=bool,
fastq1_file_in=FILE_IN, fastq1_file_out=FILE_OUT, fastq1_report=FILE_OUT,
fastq2_file_in=FILE_IN, fastq2_file_out=FILE_OUT, fastq2_report=FILE_OUT,
params=IN, isModifier=False)
def trimgalore_paired( # pylint: disable=no-self-use,too-many-arguments,too-many-locals,too-many-statements,too-many-branches
self,
fastq1_file_in, fastq1_file_out, fastq1_report,
fastq2_file_in, fastq2_file_out, fastq2_report, params): # pylint: disable=no-self-use
"""
Trims and removes low quality subsections and reads from paired-end
FASTQ files
Parameters
----------
fastq_file_in : str
Location of the input fastq file
fastq_file_out : str
Location of the output fastq file
params : dict
Parameters to use in TrimGalore
Returns
-------
bool
Indicator of the success of the function
"""
input_files_not_found = False
if os.path.isfile(fastq1_file_in) is False or os.path.getsize(fastq1_file_in) <= 4096:
logger.fatal("FILE NOT FOUND: " + fastq1_file_in)
input_files_not_found = True
elif os.path.isfile(fastq2_file_in) is False or os.path.getsize(fastq1_file_in) <= 4096:
logger.fatal("FILE NOT FOUND: " + fastq2_file_in)
input_files_not_found = True
if input_files_not_found:
return False
# Output file name used by TrimGalore
fastq1_trimmed = os.path.split(fastq1_file_in)
fastq1_trimmed = os.path.split(os.path.join(fastq1_trimmed[0], "tmp", fastq1_trimmed[1]))
fastq2_trimmed = os.path.split(fastq2_file_in)
fastq2_trimmed = os.path.split(os.path.join(fastq2_trimmed[0], "tmp", fastq2_trimmed[1]))
tail_substring = "fastq"
if ".fq" in fastq1_trimmed[1]:
tail_substring = "fq"
gzipped = False
if fastq1_trimmed[1][-3:] == ".gz":
gzipped = True
if gzipped:
tg_tmp_out_1 = os.path.join(fastq1_trimmed[0], fastq1_trimmed[1])
tg_tmp_out_1 = tg_tmp_out_1.replace(
"." + tail_substring + ".gz",
"_val_1.fq.gz"
)
tg_tmp_out_2 = os.path.join(fastq2_trimmed[0], fastq2_trimmed[1])
tg_tmp_out_2 = tg_tmp_out_2.replace(
"." + tail_substring + ".gz",
"_val_2.fq.gz"
)
else:
tg_tmp_out_1 = os.path.join(fastq1_trimmed[0], fastq1_trimmed[1])
tg_tmp_out_1 = tg_tmp_out_1.replace(
"." + tail_substring,
"_val.fq"
)
tg_tmp_out_2 = os.path.join(fastq2_trimmed[0], fastq2_trimmed[1])
tg_tmp_out_2 = tg_tmp_out_2.replace(
"." + tail_substring,
"_val.fq"
)
try:
os.mkdir(fastq1_trimmed[0])
except (OSError, IOError) as msg:
logger.warn("I/O error({0}) - tmp folder already exists: {1}".format(
msg.errno, msg.strerror))
command_line = "trim_galore " + " ".join(params) + " "
command_line += "-o " + fastq1_trimmed[0] + " "
command_line += fastq1_file_in + " " + fastq2_file_in
logger.info("TRIM GALORE: command_line: " + command_line)
saving_error = False
try:
args = shlex.split(command_line)
process = subprocess.Popen(args)
process.wait()
except (OSError, IOError) as msg:
logger.fatal("I/O error({0}) - trim_galore: {1}\n{2}".format(
msg.errno, msg.strerror, command_line))
saving_error = True
try:
args = shlex.split(command_line)
process = subprocess.Popen(args)
process.wait()
except (OSError, IOError) as msg:
logger.fatal("I/O error({0}) - trim_galore: {1}\n{2}".format(
msg.errno, msg.strerror, command_line))
saving_error = True
try:
with open(fastq1_file_out, "wb") as f_out:
with open(tg_tmp_out_1, "rb") as f_in:
f_out.write(f_in.read())
except (OSError, IOError) as error:
logger.fatal("I/O error({0}) - Missing output file: {1}\n\tFile: {2}".format(
error.errno, error.strerror, tg_tmp_out_1))
saving_error = True
try:
with open(fastq2_file_out, "wb") as f_out:
with open(tg_tmp_out_2, "rb") as f_in:
f_out.write(f_in.read())
except (OSError, IOError) as error:
logger.fatal("I/O error({0}) - Missing output file: {1}\n\tFile: {2}".format(
error.errno, error.strerror, tg_tmp_out_2))
saving_error = True
try:
tg_tmp_out_rpt_1 = os.path.join(
fastq1_trimmed[0], fastq1_trimmed[1] + "_trimming_report.txt"
)
with open(fastq1_report, "wb") as f_out:
with open(tg_tmp_out_rpt_1, "rb") as f_in:
f_out.write(f_in.read())
except (OSError, IOError) as error:
logger.fatal("I/O error({0}) - Missing output file: {1}\n\tFile in: {2}".format(
error.errno, error.strerror, tg_tmp_out_rpt_1))
saving_error = True
try:
tg_tmp_out_rpt_2 = os.path.join(
fastq2_trimmed[0], fastq2_trimmed[1] + "_trimming_report.txt"
)
with open(fastq2_report, "wb") as f_out:
with open(tg_tmp_out_rpt_2, "rb") as f_in:
f_out.write(f_in.read())
except (OSError, IOError) as error:
logger.fatal("I/O error({0}) - Missing output file: {1}\n\tFile: {2}".format(
error.errno, error.strerror, tg_tmp_out_rpt_2))
saving_error = True
if saving_error:
return False
return True
@staticmethod
def get_trimgalore_params(params):
"""
Function to handle for extraction of commandline parameters
Parameters
----------
params : dict
Returns
-------
list
"""
command_params = []
command_parameters = {
# General options
"tg_quality": ["--quality", True],
"tg_fastqc": ["--fastqc", False],
"tg_fastqc_args": ["--fastqc_args", True],
"tg_adapter": ["--adapter", True],
"tg_adapter2": ["--adapter2", True],
"tg_illumina": ["--illumina", False],
"tg_nextera": ["--nextera", False],
"tg_small_rna": ["--small_rna", False],
"tg_max_length": ["--max_length", True],
"tg_stringency": ["--stringency", True],
"tg_error_rate": ["-e", True],
"tg_length": ["--length", True],
"tg_max_n": ["--max_n", True],
"tg_trim_n": ["--trim-n", False],
# "tg_output_dir": ["--output_dir", True],
"tg_no_report_file": ["--no_report_file", False],
"tg_clip_R1": ["--clip_R1", True],
"tg_clip_R2": ["--clip_R2", True],
"tg_three_prime_clip_R1": ["--three_prime_clip_R1", True],
"tg_three_prime_clip_R2": ["--three_prime_clip_R2", True],
# RRBS specific options
"tg_rrbs": ["--rrbs", False],
"tg_non_directional": ["--non_directional", False],
"tg_keep": ["--keep", False],
# Paired-end specific options
"tg_paired": ["--paired", False],
"tg_trim1": ["--trim1", False],
"tg_retain_unpaired": ["--retain_unpaired", False],
"tg_length_1": ["--length_1", True],
"tg_length_2": ["--length_2", True],
}
for param in params:
if param in command_parameters:
if command_parameters[param][1] and params[param] != "":
command_params = command_params + [command_parameters[param][0], params[param]]
else:
if command_parameters[param][0] and params[param] is not False:
command_params.append(command_parameters[param][0])
if ("tg_phred33" in params and "tg_phred64" not in params and
params["tg_phred33"] is not False):
command_params.append(command_parameters["tg_phred33"][0])
if ("tg_phred64" in params and "tg_phred33" not in params and
params["tg_phred64"] is not False):
command_params.append(command_parameters["tg_phred64"][0])
return command_params
def run(self, input_files, input_metadata, output_files):
"""
The main function to run TrimGalore to remove low quality and very short
reads. TrimGalore uses CutAdapt and FASTQC for the analysis.
Parameters
----------
input_files : dict
fastq1 : string
Location of the FASTQ file
fastq2 : string
[OPTIONAL] Location of the paired end FASTQ file
metadata : dict
Matching metadata for the inpit FASTQ files
Returns
-------
output_files : dict
fastq1_trimmed : str
Location of the trimmed FASTQ file
fastq2_trimmed : str
[OPTIONAL] Location of a trimmed paired end FASTQ file
output_metadata : dict
Matching metadata for the output files
"""
if "tg_version" in self.configuration:
version = self.trimgalore_version()
version = compss_wait_on(version)
logger.info("TRIM GALORE VERSION: " + version)
if "fastq2" in input_files:
if "tg_paired" not in self.configuration:
self.configuration["tg_paired"] = True
command_params = self.get_trimgalore_params(self.configuration)
if "fastq2" in input_files:
logger.info("PAIRED END")
results = self.trimgalore_paired(
input_files["fastq1"],
output_files["fastq1_trimmed"],
output_files["fastq1_report"],
input_files["fastq2"],
output_files["fastq2_trimmed"],
output_files["fastq2_report"],
command_params
)
else:
logger.info("SINGLE END")
results = self.trimgalore_single(
input_files['fastq1'],
output_files["fastq1_trimmed"],
output_files["fastq1_report"],
command_params
)
results = compss_wait_on(results)
if results is False:
logger.fatal("Error in Trim Galore py: TrimGalore: run failed with error: {}", results)
return ({}, {})
output_files_created = {
"fastq1_trimmed": output_files["fastq1_trimmed"],
"fastq1_report": output_files["fastq1_report"]
}
output_metadata = {
"fastq1_trimmed": Metadata(
data_type=input_metadata["fastq1"].data_type,
file_type="FASTQ",
file_path=output_files["fastq1_trimmed"],
sources=[input_metadata["fastq1"].file_path],
taxon_id=input_metadata["fastq1"].taxon_id,
meta_data={
"tool": "trim_galore",
"parameters": command_params
}
),
"fastq1_report": Metadata(
data_type=input_metadata["fastq1"].data_type,
file_type="TXT",
file_path=output_files["fastq1_report"],
sources=[output_files["fastq1_trimmed"]],
taxon_id=input_metadata["fastq1"].taxon_id,
meta_data={
"tool": "trim_galore",
"parameters": command_params
}
)
}
if "fastq2" in input_files:
output_files_created["fastq2_trimmed"] = output_files["fastq2_trimmed"]
output_files_created["fastq2_report"] = output_files["fastq2_report"]
output_metadata["fastq2_trimmed"] = Metadata(
data_type=input_metadata["fastq2"].data_type,
file_type="FASTQ",
file_path=output_files["fastq2_trimmed"],
sources=[input_metadata["fastq2"].file_path],
taxon_id=input_metadata["fastq2"].taxon_id,
meta_data={
"tool": "trim_galore",
"parameters": command_params
}
)
output_metadata["fastq2_report"] = Metadata(
data_type=input_metadata["fastq2"].data_type,
file_type="TXT",
file_path=output_files["fastq2_report"],
sources=[output_files["fastq2_trimmed"]],
taxon_id=input_metadata["fastq2"].taxon_id,
meta_data={
"tool": "trim_galore",
"parameters": command_params
}
)
logger.info("TRIM GALORE: GENERATED FILES:\n\t{0}\n\t{1}".format(
output_files["fastq1_trimmed"], output_files["fastq2_trimmed"]))
else:
logger.info("TRIM GALORE: GENERATED FILES:\n\t{0}".format(
output_files["fastq1_trimmed"]))
return output_files_created, output_metadata
# ------------------------------------------------------------------------------
| Multiscale-Genomics/mg-process-fastq | tool/trimgalore.py | Python | apache-2.0 | 20,679 |
from io import BytesIO
import random
#
import pytest
from pyhdb.protocol import types
# ########################## Test value unpacking #####################################
@pytest.mark.parametrize("given,expected", [
(b"\xFF", None),
(b"\x2d\x50\x4f\x49\x4e\x54\x20\x28\x31\x2e\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x32\x2e\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x29",
"POINT (1.0000000000000000 2.0000000000000000)"),
(b"\x59\x4c\x49\x4e\x45\x53\x54\x52\x49\x4e\x47\x20\x28\x31\x2e\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x32\x2e" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x2c" + \
b"\x20\x32\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x20\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x29",
"LINESTRING (1.0000000000000000 2.0000000000000000, " + \
"2.0000000000000000 1.0000000000000000)"),
(b"\xa7\x50\x4f\x4c\x59\x47\x4f\x4e\x20\x28\x28\x31\x2e\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x31\x2e\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x2c\x20\x30" + \
b"\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x20\x30\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x2c\x20\x2d\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x20\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x2c\x20\x31\x2e\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x31\x2e\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x29\x29",
"POLYGON ((1.0000000000000000 1.0000000000000000, " + \
"0.0000000000000000 0.0000000000000000, " + \
"-1.0000000000000000 1.0000000000000000, " + \
"1.0000000000000000 1.0000000000000000))"),
(b"\x32\x4d\x55\x4c\x54\x49\x50\x4f\x49\x4e\x54\x20\x28\x31\x2e\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x32\x2e" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x29",
"MULTIPOINT (1.0000000000000000 2.0000000000000000)"),
(b"\x60\x4d\x55\x4c\x54\x49\x4c\x49\x4e\x45\x53\x54\x52\x49\x4e\x47\x20" + \
b"\x28\x28\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x20\x32\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x2c\x20\x32\x2e\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x20\x31\x2e\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x29\x29",
"MULTILINESTRING ((1.0000000000000000 2.0000000000000000, " + \
"2.0000000000000000 1.0000000000000000))"),
(b"\xae\x4d\x55\x4c\x54\x49\x50\x4f\x4c\x59\x47\x4f\x4e\x20\x28\x28\x28" + \
b"\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x20\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x2c\x20\x30\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x20\x30\x2e\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x2c\x20\x2d\x31\x2e\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x31\x2e\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x2c\x20\x31" + \
b"\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x20\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x29\x29\x29",
"MULTIPOLYGON (((1.0000000000000000 1.0000000000000000, " + \
"0.0000000000000000 0.0000000000000000, " + \
"-1.0000000000000000 1.0000000000000000, " + \
"1.0000000000000000 1.0000000000000000)))"),
])
def test_unpack_geometry_wkt(given, expected):
given = BytesIO(given)
assert types.Geometry.from_resultset(given) == expected
# ########################## Test value packing #####################################
@pytest.mark.parametrize("given,expected", [
(None, b"\x1d\xFF", ),
("POINT (1.0000000000000000 2.0000000000000000)",
b"\x1d\x2d\x50\x4f\x49\x4e\x54\x20\x28\x31\x2e\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x32\x2e\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x29"),
("LINESTRING (1.0000000000000000 2.0000000000000000, " + \
"2.0000000000000000 1.0000000000000000)",
b"\x1d\x59\x4c\x49\x4e\x45\x53\x54\x52\x49\x4e\x47\x20\x28\x31\x2e\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x32" + \
b"\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x2c\x20\x32\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x20\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x29"),
("POLYGON ((1.0000000000000000 1.0000000000000000, " + \
"0.0000000000000000 0.0000000000000000, " + \
"-1.0000000000000000 1.0000000000000000, " + \
"1.0000000000000000 1.0000000000000000))",
b"\x1d\xa7\x50\x4f\x4c\x59\x47\x4f\x4e\x20\x28\x28\x31\x2e\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x31\x2e\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x2c\x20" + \
b"\x30\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x20\x30\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x2c\x20\x2d\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x20\x31\x2e\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x2c\x20\x31\x2e\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x31\x2e\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x29\x29"),
("MULTIPOINT (1.0000000000000000 2.0000000000000000)",
b"\x1d\x32\x4d\x55\x4c\x54\x49\x50\x4f\x49\x4e\x54\x20\x28\x31\x2e\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x32" + \
b"\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x29"),
("MULTILINESTRING ((1.0000000000000000 2.0000000000000000, " + \
"2.0000000000000000 1.0000000000000000))",
b"\x1d\x60\x4d\x55\x4c\x54\x49\x4c\x49\x4e\x45\x53\x54\x52\x49\x4e\x47" + \
b"\x20\x28\x28\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x20\x32\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x2c\x20\x32\x2e\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x31\x2e\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x29\x29"),
("MULTIPOLYGON (((1.0000000000000000 1.0000000000000000, " + \
"0.0000000000000000 0.0000000000000000, " + \
"-1.0000000000000000 1.0000000000000000, " + \
"1.0000000000000000 1.0000000000000000)))",
b"\x1d\xae\x4d\x55\x4c\x54\x49\x50\x4f\x4c\x59\x47\x4f\x4e\x20\x28\x28" + \
b"\x28\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x20\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x2c\x20\x30\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x20\x30\x2e\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x2c\x20\x2d\x31\x2e\x30\x30\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x20\x31\x2e\x30" + \
b"\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x2c\x20" + \
b"\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x20\x31\x2e\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30" + \
b"\x30\x30\x30\x29\x29\x29"),
])
def test_pack_geometry_wkt(given, expected):
assert types.Geometry.prepare(given) == expected
# #############################################################################################################
# Real HANA interaction with geormetry (integration tests)
# #############################################################################################################
import tests.helper
TABLE = 'PYHDB_TEST_GEOMETRY'
TABLE_POINT = TABLE + "_POINT"
TABLE_GEOMETRY = TABLE + "_GEOMETRY"
TABLE_FIELDS_POINT = "point ST_POINT NOT NULL"
TABLE_FIELDS_GEOMETRY = "geo ST_GEOMETRY NOT NULL"
@pytest.fixture
def test_table_point(request, connection):
tests.helper.create_table_fixture(request, connection, TABLE_POINT,
TABLE_FIELDS_POINT, column_table=True)
@pytest.fixture
def test_table_geometry(request, connection):
tests.helper.create_table_fixture(request, connection, TABLE_GEOMETRY,
TABLE_FIELDS_GEOMETRY, column_table=True)
@pytest.mark.hanatest
def test_insert_point(connection, test_table_point):
"""Insert spatial point into table"""
cursor = connection.cursor()
point_x = random.randint(-100.0, 100.0)
point_y = random.randint(-100.0, 100.0)
wkt_string = "POINT(%f %f)" % (point_x, point_y)
cursor.execute("insert into %s (point) values (:1)" % TABLE_POINT, [wkt_string])
connection.commit()
cursor = connection.cursor()
row = cursor.execute('select point.ST_X(), point.ST_Y() from %s' % TABLE_POINT).fetchone()
assert row[0] == point_x
assert row[1] == point_y
@pytest.mark.hanatest
def test_insert_linestring(connection, test_table_geometry):
"""Insert spatial linestring into table"""
cursor = connection.cursor()
point1_x = random.randint(-100.0, 100.0)
point1_y = random.randint(-100.0, 100.0)
point2_x = random.randint(-100.0, 100.0)
point2_y = random.randint(-100.0, 100.0)
wkt_string = "LINESTRING(%f %f, %f %f)" % (point1_x, point1_y, point2_x, point2_y)
cursor.execute("insert into %s (geo) values (:1)" % TABLE_GEOMETRY, [wkt_string])
connection.commit()
cursor = connection.cursor()
sql = """
Select geo.ST_StartPoint().ST_X(), geo.ST_StartPoint().ST_Y(),
geo.ST_EndPoint().ST_X(), geo.ST_EndPoint().ST_Y()
From %s
"""
row = cursor.execute(sql % TABLE_GEOMETRY).fetchone()
assert row[0] == point1_x
assert row[1] == point1_y
assert row[2] == point2_x
assert row[3] == point2_y
@pytest.mark.hanatest
def test_insert_polygon(connection, test_table_geometry):
"""Insert spatial polygon into table"""
cursor = connection.cursor()
# The edges of a polygon can not cross. Therefore we build an arbitrary quadtrangle.
point1_x = random.randint(0, 100.0)
point1_y = random.randint(0, 100.0)
point2_x = random.randint(0, 100.0)
point2_y = random.randint(-100.0, 0)
point3_x = random.randint(-100.0, 0)
point3_y = random.randint(-100.0, 0)
point4_x = random.randint(-100.0, 0)
point4_y = random.randint(0, 100.0)
wkt_string = "POLYGON((%f %f, %f %f, %f %f, %f %f, %f %f))" % (
point1_x, point1_y, point2_x, point2_y, point3_x, point3_y,
point4_x, point4_y, point1_x, point1_y
)
cursor.execute("insert into %s (geo) values (:1)" % TABLE_GEOMETRY, [wkt_string])
connection.commit()
cursor = connection.cursor()
# We don't want to check all points of the polygon.
# We will only check the minimal and maximal values.
sql = """
Select geo.ST_XMin(), geo.ST_XMax(), geo.ST_YMin(), geo.ST_YMax()
From %s
"""
row = cursor.execute(sql % TABLE_GEOMETRY).fetchone()
assert row[0] == min(point1_x, point2_x, point3_x, point4_x)
assert row[1] == max(point1_x, point2_x, point3_x, point4_x)
assert row[2] == min(point1_y, point2_y, point3_y, point4_y)
assert row[3] == max(point1_y, point2_y, point3_y, point4_y)
| SAP/PyHDB | tests/types/test_geometry.py | Python | apache-2.0 | 12,166 |
import mimetypes
import os.path
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.core.mail import EmailMessage
from django.template import Context as TemplateContext
from django.utils import translation
import mock
from olympia.amo.models import FakeEmail
from olympia.amo.tests import BaseTestCase
from olympia.amo.utils import send_mail, send_html_mail_jinja
from olympia.devhub.tests.test_models import ATTACHMENTS_DIR
from olympia.users.models import UserProfile, UserNotification
from olympia.users import notifications
class TestSendMail(BaseTestCase):
fixtures = ['base/users']
def setUp(self):
super(TestSendMail, self).setUp()
self._email_blacklist = list(getattr(settings, 'EMAIL_BLACKLIST', []))
def tearDown(self):
translation.activate('en_US')
settings.EMAIL_BLACKLIST = self._email_blacklist
super(TestSendMail, self).tearDown()
def test_send_string(self):
to = 'f@f.com'
with self.assertRaises(ValueError):
send_mail('subj', 'body', recipient_list=to)
def test_blacklist(self):
to = 'nobody@mozilla.org'
settings.EMAIL_BLACKLIST = (to,)
success = send_mail('test subject', 'test body',
recipient_list=[to], fail_silently=False)
assert success
assert len(mail.outbox) == 0
def test_blacklist_flag(self):
to = 'nobody@mozilla.org'
settings.EMAIL_BLACKLIST = (to,)
success = send_mail('test subject', 'test body',
recipient_list=[to], fail_silently=False,
use_blacklist=True)
assert success
assert len(mail.outbox) == 0
success = send_mail('test subject', 'test body',
recipient_list=[to], fail_silently=False,
use_blacklist=False)
assert success
assert len(mail.outbox) == 1
def test_user_setting_default(self):
user = UserProfile.objects.all()[0]
to = user.email
# Confirm there's nothing in the DB and we're using the default
assert UserNotification.objects.count() == 0
# Make sure that this is True by default
setting = notifications.NOTIFICATIONS_BY_SHORT['reply']
assert setting.default_checked
success = send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to], fail_silently=False)
assert success, "Email wasn't sent"
assert len(mail.outbox) == 1
# bug 676601
assert mail.outbox[0].body.count('users/unsubscribe') == 1
def test_user_setting_checked(self):
user = UserProfile.objects.all()[0]
to = user.email
n = notifications.NOTIFICATIONS_BY_SHORT['reply']
UserNotification.objects.get_or_create(
notification_id=n.id, user=user, enabled=True)
# Confirm we're reading from the database
assert UserNotification.objects.filter(
notification_id=n.id).count() == 1
success = send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to], fail_silently=False)
assert "You received this email because" in mail.outbox[0].body
assert success, "Email wasn't sent"
assert len(mail.outbox) == 1
def test_user_mandatory(self):
# Make sure there's no unsubscribe link in mandatory emails.
user = UserProfile.objects.all()[0]
to = user.email
n = notifications.NOTIFICATIONS_BY_SHORT['individual_contact']
UserNotification.objects.get_or_create(
notification_id=n.id, user=user, enabled=True)
assert n.mandatory, "Notification isn't mandatory"
success = send_mail('test subject', 'test body', perm_setting=n,
recipient_list=[to], fail_silently=False)
assert success, "Email wasn't sent"
body = mail.outbox[0].body
assert "Unsubscribe:" not in body
assert "You can't unsubscribe from" in body
def test_user_setting_unchecked(self):
user = UserProfile.objects.all()[0]
to = user.email
n = notifications.NOTIFICATIONS_BY_SHORT['reply']
UserNotification.objects.get_or_create(
notification_id=n.id, user=user, enabled=False)
# Confirm we're reading from the database.
assert UserNotification.objects.filter(
notification_id=n.id).count() == 1
success = send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to], fail_silently=False)
assert success, "Email wasn't sent"
assert len(mail.outbox) == 0
@mock.patch.object(settings, 'EMAIL_BLACKLIST', ())
def test_success_real_mail(self):
assert send_mail('test subject', 'test body',
recipient_list=['nobody@mozilla.org'],
fail_silently=False)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject.find('test subject') == 0
assert mail.outbox[0].body.find('test body') == 0
@mock.patch.object(settings, 'EMAIL_BLACKLIST', ())
@mock.patch.object(settings, 'SEND_REAL_EMAIL', False)
def test_success_fake_mail(self):
assert send_mail('test subject', 'test body',
recipient_list=['nobody@mozilla.org'],
fail_silently=False)
assert len(mail.outbox) == 0
assert FakeEmail.objects.count() == 1
assert FakeEmail.objects.get().message.endswith('test body')
@mock.patch.object(settings, 'EMAIL_BLACKLIST', ())
@mock.patch.object(settings, 'SEND_REAL_EMAIL', False)
@mock.patch.object(settings, 'EMAIL_QA_WHITELIST', ('nobody@mozilla.org',))
def test_qa_whitelist(self):
assert send_mail('test subject', 'test body',
recipient_list=['nobody@mozilla.org'],
fail_silently=False)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject.find('test subject') == 0
assert mail.outbox[0].body.find('test body') == 0
assert FakeEmail.objects.count() == 1
assert FakeEmail.objects.get().message.endswith('test body')
@mock.patch.object(settings, 'EMAIL_BLACKLIST', ())
@mock.patch.object(settings, 'SEND_REAL_EMAIL', False)
@mock.patch.object(settings, 'EMAIL_QA_WHITELIST', ('nobody@mozilla.org',))
def test_qa_whitelist_with_mixed_emails(self):
assert send_mail('test subject', 'test body',
recipient_list=['nobody@mozilla.org', 'b@example.fr'],
fail_silently=False)
assert len(mail.outbox) == 1
assert mail.outbox[0].to == ['nobody@mozilla.org']
assert FakeEmail.objects.count() == 1
@mock.patch('olympia.amo.utils.Context')
def test_dont_localize(self, fake_Context):
perm_setting = []
def ctx(d, autoescape):
perm_setting.append(unicode(d['perm_setting']))
return TemplateContext(d, autoescape=autoescape)
fake_Context.side_effect = ctx
user = UserProfile.objects.all()[0]
to = user.email
translation.activate('zh_TW')
send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to], fail_silently=False)
assert perm_setting[0] == u'an add-on developer replies to my review'
def test_send_html_mail_jinja(self):
emails = ['omg@org.yes']
subject = u'Mozilla Add-ons: Thank you for your submission!'
html_template = 'devhub/email/submission.html'
text_template = 'devhub/email/submission.txt'
send_html_mail_jinja(subject, html_template, text_template,
context={}, recipient_list=emails,
from_email=settings.NOBODY_EMAIL,
use_blacklist=False,
perm_setting='individual_contact',
headers={'Reply-To': settings.EDITORS_EMAIL})
msg = mail.outbox[0]
message = msg.message()
assert msg.to == emails
assert msg.subject == subject
assert msg.from_email == settings.NOBODY_EMAIL
assert msg.extra_headers['Reply-To'] == settings.EDITORS_EMAIL
assert message.is_multipart()
assert message.get_content_type() == 'multipart/alternative'
assert message.get_default_type() == 'text/plain'
payload = message.get_payload()
assert payload[0].get_content_type() == 'text/plain'
assert payload[1].get_content_type() == 'text/html'
message1 = payload[0].as_string()
message2 = payload[1].as_string()
assert '<a href' not in message1, 'text-only email contained HTML!'
assert '<a href' in message2, 'HTML email did not contain HTML!'
unsubscribe_msg = unicode(notifications.individual_contact.label)
assert unsubscribe_msg in message1
assert unsubscribe_msg in message2
def test_send_attachment(self):
path = os.path.join(ATTACHMENTS_DIR, 'bacon.txt')
attachments = [(os.path.basename(path), storage.open(path).read(),
mimetypes.guess_type(path)[0])]
send_mail('test subject', 'test body', from_email='a@example.com',
recipient_list=['b@example.com'], attachments=attachments)
assert attachments == mail.outbox[0].attachments, (
'Attachments not included')
def test_send_multilines_subjects(self):
send_mail('test\nsubject', 'test body', from_email='a@example.com',
recipient_list=['b@example.com'])
assert 'test subject' == mail.outbox[0].subject, 'Subject not stripped'
def test_autoresponse_headers(self):
send_mail('subject', 'test body', from_email='a@example.com',
recipient_list=['b@example.com'])
headers = mail.outbox[0].extra_headers
assert headers['X-Auto-Response-Suppress'] == 'RN, NRN, OOF, AutoReply'
assert headers['Auto-Submitted'] == 'auto-generated'
def make_backend_class(self, error_order):
throw_error = iter(error_order)
def make_backend(*args, **kwargs):
if next(throw_error):
class BrokenMessage(object):
def __init__(*args, **kwargs):
pass
def send(*args, **kwargs):
raise RuntimeError('uh oh')
def attach_alternative(*args, **kwargs):
pass
backend = BrokenMessage()
else:
backend = EmailMessage(*args, **kwargs)
return backend
return make_backend
@mock.patch('olympia.amo.tasks.EmailMessage')
def test_async_will_retry(self, backend):
backend.side_effect = self.make_backend_class([True, True, False])
with self.assertRaises(RuntimeError):
send_mail('test subject',
'test body',
recipient_list=['somebody@mozilla.org'])
send_mail('test subject',
'test body',
async=True,
recipient_list=['somebody@mozilla.org'])
@mock.patch('olympia.amo.tasks.EmailMessage')
def test_async_will_stop_retrying(self, backend):
backend.side_effect = self.make_backend_class([True, True])
with self.assertRaises(RuntimeError):
send_mail('test subject',
'test body',
async=True,
max_retries=1,
recipient_list=['somebody@mozilla.org'])
| andymckay/addons-server | src/olympia/amo/tests/test_send_mail.py | Python | bsd-3-clause | 11,889 |
# ==============================================================================
# Copyright [2013] [Kevin Carter]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
import traceback
from subprocess import Popen, PIPE
from imager import novafunctions, utils, notifier, get_instance_name
class ImagingInProcess(Exception):
pass
class GeneralImagerFailure(Exception):
pass
class InstanceImager(object):
def __init__(self, log, p_args):
"""
Access Nova and create an image of the instance.
"""
self.m_args = p_args
self.log = log
self._nova = novafunctions.NovaFunctionalLogic(logger=log, args=p_args)
self.nova = self._nova.os_auth()
self.uuid = self.get_instance_id()
self.name = get_instance_name()
tag = 'CloudNull_imageizer'
self.image_name = '%s_%s_%s_%s' % (self.name,
self.m_args['image_name'],
self.uuid,
tag)
self.check_for_image()
self.create_image()
def commander(self, cmd):
"""
Run sub-process (SHELL) commands.
"""
output = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)
stdout, stderr = output.communicate()
if stderr:
raise GeneralImagerFailure(stderr)
return stdout.strip()
def get_instance_id(self):
"""
Get the UUID of the instance that we want to make an image of
"""
# Check to see if this application will be run as a Daemon
try:
xen_s = self.commander(cmd='which xenstore')
dom_id = self.commander(cmd='%s read domid' % xen_s)
slice_id = self.commander(cmd='%s read /local/domain/%s/name'
% (xen_s, dom_id))
slice_id = slice_id.split('-', 1)[1]
return slice_id
except Exception:
_tb = traceback.format_exc()
self.log.error(_tb)
sys.exit('%s\n\nSub Process FAILURE!!!' % _tb)
def check_for_image(self):
from datetime import datetime as dt, timedelta
from dateutil import parser
try:
img = self._nova.list_image(nova=self.nova)
for obj in img['images']:
if (obj['name'] == self.image_name and
obj['status'].upper() == 'ACTIVE'):
self._nova.destroy_image(image_id=obj['id'],
nova=self.nova)
elif (obj['name'] == self.image_name and
obj['status'].upper() == 'SAVING'):
time = obj['updated']
_tm = time.replace('Z', '')
_time = parser.parse(_tm)
_now = dt.utcnow()
if (_now - _time) > timedelta(hours=5):
obj['reason'] = 'crusty_image'
self.notify_me(obj)
self._nova.destroy_image(image_id=obj['id'],
nova=self.nova)
else:
raise ImagingInProcess('Image ID "%s"' % obj['id'])
except ImagingInProcess, exp:
self.log.info('Imaging is presently in progress. %s' % exp)
def create_image(self):
try:
randkey = utils.rand_string()
self._nova.image_create(nova=self.nova,
server_id=self.uuid,
img_name=self.image_name,
meta_data={'cloudnull': randkey})
img = self._nova.list_image(nova=self.nova)
for obj in img['images']:
if (obj['name'] == self.image_name and
obj['status'].upper() == 'SAVING' and
'cloudnull' in obj['metadata'] and
obj['metadata']['cloudnull'] == randkey):
imid = obj['id']
if imid:
data = self._nova.status_active(image_id=imid, nova=self.nova)
if data:
data['reason'] = 'image_done'
self.notify_me(data)
else:
self._nova.destroy_image(image_id=imid,
nova=self.nova)
data['reason'] = 'never_active'
self.notify_me(data)
raise GeneralImagerFailure('We Failed waiting for image %s'
' to go active' % imid)
except Exception, exp:
data = {'reason': 'image_fail',
'data': exp,
'trace': traceback.format_exc()}
self.notify_me(data)
def notify_me(self, notice):
notifier.Mailer(notice=notice,
args=self.m_args,
logger=self.log)
del notice
| cloudnull/transporter | imager/imaging.py | Python | apache-2.0 | 5,608 |
scalapack = True
compiler = 'gcc43'
libraries = [
'gfortran',
'scalapack', 'mpiblacsF77init', 'mpiblacs', 'scalapack',
'xc',
'goto2', 'acml', 'acml_mv',
# must not link to mpi explicitly: -export-dynamic must be used instead
]
library_dirs =[
'/opt/openmpi/1.3.3-1.el5.fys.gfortran43.4.3.2/lib64',
'/opt/goto2/2.1.13/1.el5.fys.gfortran43.4.3.2.smp/lib64',
'/opt/acml/4.4.0/gfortran4364/lib',
'/opt/blacs/1.1/24.el5.fys.gfortran43.4.3.2.openmpi.1.3.3/lib64',
'/opt/scalapack/1.8.0/1.el5.fys.gfortran43.4.3.2.openmpi.1.3.3.goto2.2.1.13.acml.4.4.0/lib64',
'/usr/lib64'
]
include_dirs +=['/opt/openmpi/1.3.3-1.el5.fys.gfortran43.4.3.2/include']
extra_link_args =[
'-export-dynamic -Wl,-rpath=/opt/openmpi/1.3.3-1.el5.fys.gfortran43.4.3.2/lib64,'
'-rpath=/opt/goto2/2.1.13/1.el5.fys.gfortran43.4.3.2.smp/lib64,'
'-rpath=/opt/acml/4.4.0/gfortran4364/lib,'
'-rpath=/opt/blacs/1.1/24.el5.fys.gfortran43.4.3.2.openmpi.1.3.3/lib64,'
'-rpath=/opt/scalapack/1.8.0/1.el5.fys.gfortran43.4.3.2.openmpi.1.3.3.goto2.2.1.13.acml.4.4.0/lib64,'
'-rpath=/usr/lib64'
]
extra_compile_args = ['-O3', '-std=c99', '-funroll-all-loops', '-fPIC']
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
import tau
import os
tau_path = tau.__file__[0:tau.__file__.find('lib')]
tau_make = tau_path+'lib/Makefile.tau-mpi-pthread-python-pdt'
mpicompiler = "tau_cc.sh -tau_options='-optShared -optCompInst -optVerbose -optMpi' -optTau='-rn Py_RETURN_NONE -i"+os.path.join(os.environ['TAUROOT'], 'include', 'TAU_PYTHON_FIX.h')+"' -tau_makefile="+tau_make
#mpicompiler = "tau_cc.sh -tau_options='-optShared -optCompInst -optVerbose -optMpi' -optTau='-rn Py_RETURN_NONE' -tau_makefile="+tau_make
mpilinker = mpicompiler
compiler = mpicompiler
extra_link_args += ['-Wl,-rpath='+tau_path+'lib/']
platform_id = 'opteron-TAU'
| robwarm/gpaw-symm | doc/install/Linux/Niflheim/el5-opteron-gcc43-goto2-1.13-acml-4.4.0-TAU.py | Python | gpl-3.0 | 1,924 |
import vtk
from vtk.util.vtkAlgorithm import VTKPythonAlgorithmBase
from vtk.util import numpy_support
from timeit import default_timer as timer
import logging
class FilterClassifier(VTKPythonAlgorithmBase):
"""
vtkAlgorithm with 2 inputs of vtkImageData and an output of vtkImageData
Input: Depth images
Output: Classified depth image
"""
def __init__(self, param_classifier_threshold=0.01):
"""
:param param_classifier_threshold: default=0.01
Threshold to determine when the difference in the depth images is too big
and is therefore a novel measurement.
:return:
"""
VTKPythonAlgorithmBase.__init__(self,
nInputPorts=2, inputType='vtkImageData',
nOutputPorts=1, outputType='vtkImageData')
self._param_classifier_threshold = param_classifier_threshold
self._postprocess = []
self._postprocess_im1 = []
self._postprocess_im2 = []
self._postprocess_difim = []
def set_postprocess(self, do_postprocess):
self._postprocess = do_postprocess
def get_depth_images(self):
"""
Get the depth images. User has to call set_postprocess(True) first.
:return: Depth images
* return[0] - actual
* return[1] - expected
* return[2] - threshold absolute difference
"""
return self._postprocess_im1, self._postprocess_im2, self._postprocess_difim
def RequestInformation(self, request, inInfo, outInfo):
logging.info('')
# input images dimensions
info = inInfo[0].GetInformationObject(0)
ue1 = info.Get(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_EXTENT())
info = inInfo[1].GetInformationObject(0)
ue2 = info.Get(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_EXTENT())
if ue1 != ue2:
logging.warning('Input images have different dimensions. {} {}'.format(ue1, ue2))
extent = ue1
info = outInfo.GetInformationObject(0)
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(),
extent, len(extent))
return 1
def RequestData(self, request, inInfo, outInfo):
logging.info('')
start = timer()
# in images (vtkImageData)
inp1 = vtk.vtkImageData.GetData(inInfo[0])
inp2 = vtk.vtkImageData.GetData(inInfo[1])
# convert to numpy arrays
dim = inp1.GetDimensions()
im1 = numpy_support.vtk_to_numpy(inp1.GetPointData().GetScalars())\
.reshape(dim[1], dim[0])
dim = inp1.GetDimensions()
im2 = numpy_support.vtk_to_numpy(inp2.GetPointData().GetScalars())\
.reshape(dim[1], dim[0])
# difference in the images
# im1 is assumed to be from the actual sensor
# im2 is what we expect to see based on the world mesh
# Anywhere the difference is small, throw those measurements away
# by setting them to one. By doing this FilterDepthImageToSurface
# will assume they lie on the clipping plane and will remove them
difim = abs(im1 - im2) < self._param_classifier_threshold
if self._postprocess:
self._postprocess_im1 = im1.copy()
self._postprocess_im2 = im2.copy()
self._postprocess_difim = difim.copy()
imout = im1
imout[difim] = 1.0
info = outInfo.GetInformationObject(0)
ue = info.Get(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_EXTENT())
# output vtkImageData
out = vtk.vtkImageData.GetData(outInfo)
out.SetExtent(ue)
(out.sizex, out.sizey, out.tmat, out.viewport) = \
(inp1.sizex, inp1.sizey, inp1.tmat, inp1.viewport)
out.GetPointData().SetScalars(
numpy_support.numpy_to_vtk(imout.reshape(-1)))
end = timer()
logging.info('Execution time {:.4f} seconds'.format(end - start))
return 1
| lucasplus/MABDI | mabdi/FilterClassifier.py | Python | bsd-3-clause | 4,012 |
#!/usr/bin/python
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: elasticache_parameter_group
short_description: Manage cache parameter groups in Amazon ElastiCache.
description:
- Manage cache security groups in Amazon ElastiCache.
- Returns information about the specified cache cluster.
version_added: "2.3"
author: "Sloane Hertel (@s-hertel)"
extends_documentation_fragment:
- aws
- ec2
requirements: [ boto3, botocore ]
options:
group_family:
description:
- The name of the cache parameter group family that the cache parameter group can be used with.
Required when creating a cache parameter group.
choices: ['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']
type: str
name:
description:
- A user-specified name for the cache parameter group.
required: yes
type: str
description:
description:
- A user-specified description for the cache parameter group.
type: str
state:
description:
- Idempotent actions that will create/modify, destroy, or reset a cache parameter group as needed.
choices: ['present', 'absent', 'reset']
required: true
type: str
values:
description:
- A user-specified dictionary of parameters to reset or modify for the cache parameter group.
type: dict
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
---
- hosts: localhost
connection: local
tasks:
- name: 'Create a test parameter group'
elasticache_parameter_group:
name: 'test-param-group'
group_family: 'redis3.2'
description: 'This is a cache parameter group'
state: 'present'
- name: 'Modify a test parameter group'
elasticache_parameter_group:
name: 'test-param-group'
values:
activerehashing: yes
client-output-buffer-limit-normal-hard-limit: 4
state: 'present'
- name: 'Reset all modifiable parameters for the test parameter group'
elasticache_parameter_group:
name: 'test-param-group'
state: reset
- name: 'Delete a test parameter group'
elasticache_parameter_group:
name: 'test-param-group'
state: 'absent'
"""
RETURN = """
elasticache:
description: cache parameter group information and response metadata
returned: always
type: dict
sample:
cache_parameter_group:
cache_parameter_group_family: redis3.2
cache_parameter_group_name: test-please-delete
description: "initial description"
response_metadata:
http_headers:
content-length: "562"
content-type: text/xml
date: "Mon, 06 Feb 2017 22:14:08 GMT"
x-amzn-requestid: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1
http_status_code: 200
request_id: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1
retry_attempts: 0
changed:
description: if the cache parameter group has changed
returned: always
type: bool
sample:
changed: true
"""
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict
from ansible.module_utils._text import to_text
from ansible.module_utils.six import string_types
import traceback
try:
import boto3
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def create(module, conn, name, group_family, description):
""" Create ElastiCache parameter group. """
try:
response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to create cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
return response, changed
def delete(module, conn, name):
""" Delete ElastiCache parameter group. """
try:
conn.delete_cache_parameter_group(CacheParameterGroupName=name)
response = {}
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to delete cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
return response, changed
def make_current_modifiable_param_dict(module, conn, name):
""" Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}"""
current_info = get_info(conn, name)
if current_info is False:
module.fail_json(msg="Could not connect to the cache parameter group %s." % name)
parameters = current_info["Parameters"]
modifiable_params = {}
for param in parameters:
if param["IsModifiable"]:
modifiable_params[param["ParameterName"]] = [param.get("AllowedValues")]
modifiable_params[param["ParameterName"]].append(param["DataType"])
modifiable_params[param["ParameterName"]].append(param.get("ParameterValue"))
return modifiable_params
def check_valid_modification(module, values, modifiable_params):
""" Check if the parameters and values in values are valid. """
changed_with_update = False
for parameter in values:
new_value = values[parameter]
# check valid modifiable parameters
if parameter not in modifiable_params:
module.fail_json(msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." % (parameter, modifiable_params.keys()))
# check allowed datatype for modified parameters
str_to_type = {"integer": int, "string": string_types}
expected_type = str_to_type[modifiable_params[parameter][1]]
if not isinstance(new_value, expected_type):
if expected_type == str:
if isinstance(new_value, bool):
values[parameter] = "yes" if new_value else "no"
else:
values[parameter] = to_text(new_value)
elif expected_type == int:
if isinstance(new_value, bool):
values[parameter] = 1 if new_value else 0
else:
module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
(new_value, type(new_value), parameter, modifiable_params[parameter][1]))
else:
module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
(new_value, type(new_value), parameter, modifiable_params[parameter][1]))
# check allowed values for modifiable parameters
choices = modifiable_params[parameter][0]
if choices:
if not (to_text(new_value) in choices or isinstance(new_value, int)):
module.fail_json(msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." %
(new_value, parameter, choices))
# check if a new value is different from current value
if to_text(values[parameter]) != modifiable_params[parameter][2]:
changed_with_update = True
return changed_with_update, values
def check_changed_parameter_values(values, old_parameters, new_parameters):
""" Checking if the new values are different than the old values. """
changed_with_update = False
# if the user specified parameters to reset, only check those for change
if values:
for parameter in values:
if old_parameters[parameter] != new_parameters[parameter]:
changed_with_update = True
break
# otherwise check all to find a change
else:
for parameter in old_parameters:
if old_parameters[parameter] != new_parameters[parameter]:
changed_with_update = True
break
return changed_with_update
def modify(module, conn, name, values):
""" Modify ElastiCache parameter group to reflect the new information if it differs from the current. """
# compares current group parameters with the parameters we've specified to to a value to see if this will change the group
format_parameters = []
for key in values:
value = to_text(values[key])
format_parameters.append({'ParameterName': key, 'ParameterValue': value})
try:
response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to modify cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
return response
def reset(module, conn, name, values):
""" Reset ElastiCache parameter group if the current information is different from the new information. """
# used to compare with the reset parameters' dict to see if there have been changes
old_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
format_parameters = []
# determine whether to reset all or specific parameters
if values:
all_parameters = False
format_parameters = []
for key in values:
value = to_text(values[key])
format_parameters.append({'ParameterName': key, 'ParameterValue': value})
else:
all_parameters = True
try:
response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to reset cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# determine changed
new_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
changed = check_changed_parameter_values(values, old_parameters_dict, new_parameters_dict)
return response, changed
def get_info(conn, name):
""" Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access. """
try:
data = conn.describe_cache_parameters(CacheParameterGroupName=name)
return data
except botocore.exceptions.ClientError as e:
return False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']),
name=dict(required=True, type='str'),
description=dict(default='', type='str'),
state=dict(required=True, choices=['present', 'absent', 'reset']),
values=dict(type='dict'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto required for this module')
parameter_group_family = module.params.get('group_family')
parameter_group_name = module.params.get('name')
group_description = module.params.get('description')
state = module.params.get('state')
values = module.params.get('values')
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")
connection = boto3_conn(module, conn_type='client',
resource='elasticache', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
exists = get_info(connection, parameter_group_name)
# check that the needed requirements are available
if state == 'present' and not (exists or parameter_group_family):
module.fail_json(msg="Creating a group requires a family group.")
elif state == 'reset' and not exists:
module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name)
# Taking action
changed = False
if state == 'present':
if exists:
# confirm that the group exists without any actions
if not values:
response = exists
changed = False
# modify existing group
else:
modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
changed, values = check_valid_modification(module, values, modifiable_params)
response = modify(module, connection, parameter_group_name, values)
# create group
else:
response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description)
if values:
modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
changed, values = check_valid_modification(module, values, modifiable_params)
response = modify(module, connection, parameter_group_name, values)
elif state == 'absent':
if exists:
# delete group
response, changed = delete(module, connection, parameter_group_name)
else:
response = {}
changed = False
elif state == 'reset':
response, changed = reset(module, connection, parameter_group_name, values)
facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response))
module.exit_json(**facts_result)
if __name__ == '__main__':
main()
| simonwydooghe/ansible | lib/ansible/modules/cloud/amazon/elasticache_parameter_group.py | Python | gpl-3.0 | 14,264 |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_document_element_info
except ImportError:
bt_document_element_info = sys.modules[
"onshape_client.oas.models.bt_document_element_info"
]
class BTDocumentMergeInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"library_version_mismatch": (bool,), # noqa: E501
"overwritten_elements": (
[bt_document_element_info.BTDocumentElementInfo],
), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"library_version_mismatch": "libraryVersionMismatch", # noqa: E501
"overwritten_elements": "overwrittenElements", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_document_merge_info.BTDocumentMergeInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
library_version_mismatch (bool): [optional] # noqa: E501
overwritten_elements ([bt_document_element_info.BTDocumentElementInfo]): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| onshape-public/onshape-clients | python/onshape_client/oas/models/bt_document_merge_info.py | Python | mit | 5,168 |
# coding=utf-8
from setuptools import setup, find_packages
setup(
name='geventhttpclient-oauthlib',
version='0.1a',
install_requires=[
'geventhttpclient>=1.0a',
'oauthlib>=0.3.0'],
url='https://github.com/jneight/geventhttpclient-oauthlib',
packages=find_packages(),
include_package_data=True,
license='BSD License',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
author='Javier Cordero',
author_email='jcorderomartinez@gmail.com'
)
| jneight/geventhttpclient-oauthlib | setup.py | Python | bsd-3-clause | 799 |
# # -*- coding: utf-8 -*-
#
# # ##########################################################################
# ## Python code generated with wxFormBuilder (version Feb 26 2014)
# ## http://www.wxformbuilder.org/
# ##
# ## PLEASE DO "NOT" EDIT THIS FILE!
# ###########################################################################
#
# import wx
# import wx.xrc
# import wx.aui
#
# ###########################################################################
# ## Class pnlIntro
# ###########################################################################
#
# class pnlIntro(wx.Panel):
# # id=wxID_PNLINTRO, name=u'pnlIntro',
# # pos=wx.Point(536, 285), size=wx.Size(439, 357), style=wx.TAB_TRAVERSAL
# def __init__(self, parent):#, id, pos, size, style, name):
# #wx.Panel.__init__(self, parent=parent), id=id, pos=pos, size=size, style=style, name=name)
# wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 500,300 ), style = wx.TAB_TRAVERSAL )
#
# self.m_mgr = wx.aui.AuiManager()
# self.m_mgr.SetManagedWindow(self)
# self.m_mgr.SetFlags(wx.aui.AUI_MGR_DEFAULT)
#
# self.m_panel2 = wx.Panel(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
# self.m_mgr.AddPane(self.m_panel2,
# wx.aui.AuiPaneInfo().Center().CaptionVisible(False).CloseButton(False).PaneBorder(
# False).Movable(False).Dock().Resizable().FloatingSize(wx.DefaultSize).BottomDockable(
# False).TopDockable(False).LeftDockable(False).RightDockable(False).Floatable(False))
#
# bSizer2 = wx.BoxSizer(wx.VERTICAL)
#
# self.lblHow = wx.StaticText(self.m_panel2, wx.ID_ANY, u"How would you like to save the series?",
# wx.DefaultPosition, wx.DefaultSize, 0)
# self.lblHow.Wrap(-1)
# bSizer2.Add(self.lblHow, 0, wx.ALL, 15)
#
# bSizer3 = wx.BoxSizer(wx.HORIZONTAL)
#
# self.m_panel3 = wx.Panel(self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
# bSizer3.Add(self.m_panel3, 10, wx.EXPAND | wx.ALL, 5)
#
# self.rbSave = wx.RadioButton(self.m_panel2, wx.ID_ANY,
# u"Save (Save the data using the same Series Catalog Entry)", wx.DefaultPosition,
# wx.DefaultSize, wx.RB_GROUP)
# self.rbSave.SetValue(True)
# bSizer3.Add(self.rbSave, 90, wx.ALL, 5)
#
# bSizer2.Add(bSizer3, 1, wx.EXPAND, 5)
#
# bSizer4 = wx.BoxSizer(wx.HORIZONTAL)
#
# self.m_panel4 = wx.Panel(self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
# bSizer4.Add(self.m_panel4, 10, wx.EXPAND | wx.ALL, 5)
#
# self.rbSaveAs = wx.RadioButton(self.m_panel2, wx.ID_ANY, u"Save As.. (Create a new Series Catalog Entry)",
# wx.DefaultPosition, wx.DefaultSize, 0)
# bSizer4.Add(self.rbSaveAs, 90, wx.ALL, 5)
#
# bSizer2.Add(bSizer4, 1, wx.EXPAND, 0)
#
# bSizer5 = wx.BoxSizer(wx.HORIZONTAL)
#
# self.m_panel5 = wx.Panel(self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
# bSizer5.Add(self.m_panel5, 10, wx.EXPAND | wx.ALL, 5)
#
# self.rbSaveExisting = wx.RadioButton(self.m_panel2, wx.ID_ANY,
# u"Save As Existing.. (Save to an Existing Series Catalog Entry)",
# wx.DefaultPosition, wx.DefaultSize, 0)
# bSizer5.Add(self.rbSaveExisting, 90, wx.ALL, 5)
#
# bSizer2.Add(bSizer5, 1, wx.EXPAND, 5)
#
# bSizer2.AddSpacer(( 10, 150), 1, wx.EXPAND, 5)
#
# self.m_panel2.SetSizer(bSizer2)
# self.m_panel2.Layout()
# bSizer2.Fit(self.m_panel2)
#
# self.m_mgr.Update()
#
# # Connect Events
# self.rbSave.Bind(wx.EVT_RADIOBUTTON, self.OnBtnSaveRadiobutton)
# self.rbSaveAs.Bind(wx.EVT_RADIOBUTTON, self.OnBtnSaveAsRadiobutton)
# self.rbSaveExisting.Bind(wx.EVT_RADIOBUTTON, self.OnBtnSaveExistingRadiobuton)
#
# def __del__(self):
# self.m_mgr.UnInit()
#
#
# # Virtual event handlers, overide them in your derived class
# def OnBtnSaveRadiobutton(self, event):
# print "in parent"
# event.Skip()
#
# def OnBtnSaveAsRadiobutton(self, event):
# print "in parent"
# event.Skip()
#
# def OnBtnSaveExistingRadiobuton(self, event):
# print "in parent"
# event.Skip()
#
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 5 2014)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import wx.aui
###########################################################################
## Class pnlIntro
###########################################################################
class pnlIntro(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.Size(500, 300),
style=wx.TAB_TRAVERSAL)
self.m_mgr = wx.aui.AuiManager()
self.m_mgr.SetManagedWindow(self)
self.m_mgr.SetFlags(wx.aui.AUI_MGR_DEFAULT)
self.m_panel2 = wx.Panel(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
self.m_mgr.AddPane(self.m_panel2,
wx.aui.AuiPaneInfo().Center().CaptionVisible(False).CloseButton(False).PaneBorder(
False).Movable(False).Dock().Resizable().FloatingSize(wx.DefaultSize).BottomDockable(
False).TopDockable(False).LeftDockable(False).RightDockable(False).Floatable(False))
bSizer2 = wx.BoxSizer(wx.VERTICAL)
self.lblHow = wx.StaticText(self.m_panel2, wx.ID_ANY, u"How would you like to save the series?",
wx.DefaultPosition, wx.DefaultSize, 0)
self.lblHow.Wrap(-1)
bSizer2.Add(self.lblHow, 0, wx.ALL, 15)
bSizer3 = wx.BoxSizer(wx.HORIZONTAL)
self.m_panel3 = wx.Panel(self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
bSizer3.Add(self.m_panel3, 10, wx.EXPAND | wx.ALL, 5)
self.rbSave = wx.RadioButton(self.m_panel2, wx.ID_ANY, u"Save ", wx.DefaultPosition, wx.DefaultSize,
wx.RB_GROUP)
self.rbSave.SetValue(True)
bSizer3.Add(self.rbSave, 90, wx.ALL, 5)
bSizer2.Add(bSizer3, 1, wx.EXPAND, 5)
bSizer4 = wx.BoxSizer(wx.HORIZONTAL)
self.m_panel4 = wx.Panel(self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
bSizer4.Add(self.m_panel4, 10, wx.EXPAND | wx.ALL, 5)
self.rbSaveAsOption = wx.StaticText(self.m_panel2, wx.ID_ANY, u"Save As.. ", wx.DefaultPosition,
wx.DefaultSize, 0)
# self.rbSaveAsOption.Enable(False)
bSizer4.Add(self.rbSaveAsOption, 90, wx.ALL, 5)
bSizer2.Add(bSizer4, 1, wx.EXPAND, 0)
bSizer5 = wx.BoxSizer(wx.HORIZONTAL)
self.m_panel5 = wx.Panel(self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
bSizer5.Add(self.m_panel5, 20, wx.EXPAND | wx.ALL, 5)
self.rbSaveExisting = wx.RadioButton(self.m_panel2, wx.ID_ANY, u"Existing Series", wx.DefaultPosition,
wx.DefaultSize, 0)
bSizer5.Add(self.rbSaveExisting, 90, wx.ALL, 5)
bSizer2.Add(bSizer5, 1, wx.EXPAND, 5)
bSizer51 = wx.BoxSizer(wx.HORIZONTAL)
self.m_panel51 = wx.Panel(self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
bSizer51.Add(self.m_panel51, 20, wx.EXPAND | wx.ALL, 5)
self.rbSaveAs = wx.RadioButton(self.m_panel2, wx.ID_ANY, u"New Series", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer51.Add(self.rbSaveAs, 90, wx.ALL, 5)
bSizer2.Add(bSizer51, 1, wx.EXPAND, 5)
bSizer2.AddSpacer(( 10, 150), 1, wx.EXPAND, 5)
self.m_panel2.SetSizer(bSizer2)
self.m_panel2.Layout()
bSizer2.Fit(self.m_panel2)
self.m_mgr.Update()
# Connect Events
self.rbSave.Bind(wx.EVT_RADIOBUTTON, self.OnBtnSaveRadiobutton)
self.rbSaveAsOption.Bind(wx.EVT_RADIOBUTTON, self.OnBtnSaveAsRadiobutton)
self.rbSaveExisting.Bind(wx.EVT_RADIOBUTTON, self.OnBtnSaveExistingRadiobuton)
self.rbSaveAs.Bind(wx.EVT_RADIOBUTTON, self.OnBtnSaveExistingRadiobuton)
def __del__(self):
self.m_mgr.UnInit()
# Virtual event handlers, overide them in your derived class
def OnBtnSaveRadiobutton(self, event):
print "in parent"
event.Skip()
def OnBtnSaveAsRadiobutton(self, event):
print "in parent"
event.Skip()
def OnBtnSaveExistingRadiobuton(self, event):
print "in parent"
event.Skip()
| ODM2/ODMToolsPython | odmtools/view/clsIntro.py | Python | bsd-3-clause | 9,250 |
import os
import sys
from setuptools import setup
from io import open
from zappa import __version__
with open('README.md') as readme_file:
long_description = readme_file.read()
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f:
if sys.version_info[0] == 2:
required = f.read().splitlines()
else:
# This logic is intended to prevent the futures package from being installed in python 3 environments
# as it can cause unexpected syntax errors in other packages. Futures is in the standard library in python 3
# and is should never be installed in these environments.
# Related: https://github.com/Miserlou/Zappa/issues/1179
required = []
for package in f.read().splitlines():
if 'futures' not in package:
required.append(package)
with open(os.path.join(os.path.dirname(__file__), 'test_requirements.txt')) as f:
test_required = f.read().splitlines()
setup(
name='zappa',
version=__version__,
packages=['zappa'],
install_requires=required,
tests_require=test_required,
test_suite='nose.collector',
include_package_data=True,
license='MIT License',
description='Server-less Python Web Services for AWS Lambda and API Gateway',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/Miserlou/Zappa',
author='Rich Jones',
author_email='rich@openwatch.net',
entry_points={
'console_scripts': [
'zappa=zappa.cli:handle',
'z=zappa.cli:handle',
]
},
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| pjz/Zappa | setup.py | Python | mit | 2,143 |
import module
# unique to module
from urlparse import urlparse
import re
class Module(module.Module):
def __init__(self, params):
module.Module.__init__(self, params, query='SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL ORDER BY domain')
self.register_option('limit', 0, True, 'limit total number of api requests (0 = unlimited)')
self.info = {
'Name': 'Bing API Hostname Enumerator',
'Author': 'Marcus Watson (@BranMacMuffin)',
'Description': 'Leverages the Bing API and "domain:" advanced search operator to harvest hosts. Updates the \'hosts\' table with the results.'
}
def module_run(self, domains):
limit = self.options['limit']
requests = 0
cnt = 0
new = 0
for domain in domains:
self.heading(domain, level=0)
hosts = []
results = []
pages = 1
base_query = '\'domain:%s' % (domain)
while not limit or requests < limit:
query = base_query
# build query string based on api limitations
for host in hosts:
omit_domain = ' -domain:%s' % (host)
if len(query) + len(omit_domain) < 1425:
query += omit_domain
else:
break
query += '\''
# make api requests
if limit and requests + pages > limit:
pages = limit - requests
last_len = len(results)
results = self.search_bing_api(query, pages)
requests += pages
# iterate through results and add new hosts
new = False
for result in results:
host = urlparse(result['Url']).netloc
if not host in hosts and host != domain:
hosts.append(host)
self.output(host)
new += self.add_hosts(host)
new = True
if not new and last_len == len(results):
break
elif not new and last_len != len(results):
pages += 1
self.verbose('No new hosts found for the current query. Increasing depth to \'%d\' pages.' % (pages))
cnt += len(hosts)
self.summarize(new, cnt)
| digistam/recon-ng | modules/recon/domains-hosts/bing_domain_api.py | Python | gpl-3.0 | 2,500 |
#!/usr/bin/python
import sys
entrez2symbol = dict()
f = open(sys.argv[1])
f.readline()
for i in f:
fields = i.rstrip().split()
entrez2symbol[fields[0]] = fields[1]
f.close()
f = open(sys.argv[2]) # entrez 2 kegg
f.readline()
for i in f:
fields = i.rstrip().split()
if fields[1] in entrez2symbol:
print entrez2symbol[fields[1]] + "\t" + fields[2]
| TravisCG/SI_scripts | sym2kegg.py | Python | gpl-3.0 | 355 |
import logging
import subprocess
LOGGER = logging.getLogger(__name__)
def execute(command):
LOGGER.debug("Executing %s", ' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(input=None)
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, command, "Failed to execute comand")
LOGGER.debug("stdout %s", stdout)
LOGGER.debug("stderr %s", stderr)
return stdout, stderr
| EliRibble/mothermayi | mothermayi/process.py | Python | mit | 511 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
import mock
import sys
import socket
from helpers import with_config
from luigi import notifications
from luigi.scheduler import Scheduler
from luigi.worker import Worker
from luigi import six
import luigi
class TestEmail(unittest.TestCase):
def testEmailNoPrefix(self):
self.assertEqual("subject", notifications._prefix('subject'))
@with_config({"email": {"prefix": "[prefix]"}})
def testEmailPrefix(self):
self.assertEqual("[prefix] subject", notifications._prefix('subject'))
class TestException(Exception):
pass
class TestTask(luigi.Task):
foo = luigi.Parameter()
bar = luigi.Parameter()
class FailSchedulingTask(TestTask):
def requires(self):
raise TestException('Oops!')
def run(self):
pass
def complete(self):
return False
class FailRunTask(TestTask):
def run(self):
raise TestException('Oops!')
def complete(self):
return False
class ExceptionFormatTest(unittest.TestCase):
def setUp(self):
self.sch = Scheduler()
def test_fail_run(self):
task = FailRunTask(foo='foo', bar='bar')
self._run_task(task)
def test_fail_run_html(self):
task = FailRunTask(foo='foo', bar='bar')
self._run_task_html(task)
def test_fail_schedule(self):
task = FailSchedulingTask(foo='foo', bar='bar')
self._run_task(task)
def test_fail_schedule_html(self):
task = FailSchedulingTask(foo='foo', bar='bar')
self._run_task_html(task)
@with_config({'email': {'receiver': 'nowhere@example.com',
'prefix': '[TEST] '}})
@mock.patch('luigi.notifications.send_error_email')
def _run_task(self, task, mock_send):
with Worker(scheduler=self.sch) as w:
w.add(task)
w.run()
self.assertEqual(mock_send.call_count, 1)
args, kwargs = mock_send.call_args
self._check_subject(args[0], task)
self._check_body(args[1], task, html=False)
@with_config({'email': {'receiver': 'nowhere@axample.com',
'prefix': '[TEST] ',
'format': 'html'}})
@mock.patch('luigi.notifications.send_error_email')
def _run_task_html(self, task, mock_send):
with Worker(scheduler=self.sch) as w:
w.add(task)
w.run()
self.assertEqual(mock_send.call_count, 1)
args, kwargs = mock_send.call_args
self._check_subject(args[0], task)
self._check_body(args[1], task, html=True)
def _check_subject(self, subject, task):
self.assertIn(str(task), subject)
def _check_body(self, body, task, html=False):
if html:
self.assertIn('<th>name</th><td>{}</td>'.format(task.task_family), body)
self.assertIn('<div class="highlight"', body)
self.assertIn('Oops!', body)
for param, value in task.param_kwargs.items():
self.assertIn('<th>{}</th><td>{}</td>'.format(param, value), body)
else:
self.assertIn('Name: {}\n'.format(task.task_family), body)
self.assertIn('Parameters:\n', body)
self.assertIn('TestException: Oops!', body)
for param, value in task.param_kwargs.items():
self.assertIn('{}: {}\n'.format(param, value), body)
@with_config({"email": {"receiver": "a@a.a"}})
def testEmailRecipients(self):
six.assertCountEqual(self, notifications._email_recipients(), ["a@a.a"])
six.assertCountEqual(self, notifications._email_recipients("b@b.b"), ["a@a.a", "b@b.b"])
six.assertCountEqual(self, notifications._email_recipients(["b@b.b", "c@c.c"]),
["a@a.a", "b@b.b", "c@c.c"])
@with_config({"email": {}}, replace_sections=True)
def testEmailRecipientsNoConfig(self):
six.assertCountEqual(self, notifications._email_recipients(), [])
six.assertCountEqual(self, notifications._email_recipients("a@a.a"), ["a@a.a"])
six.assertCountEqual(self, notifications._email_recipients(["a@a.a", "b@b.b"]),
["a@a.a", "b@b.b"])
class NotificationFixture(object):
"""
Defines API and message fixture.
config, sender, subject, message, recipients, image_png
"""
sender = 'luigi@unittest'
subject = 'Oops!'
message = """A multiline
message."""
recipients = ['noone@nowhere.no', 'phantom@opera.fr']
image_png = None
notification_args = [sender, subject, message, recipients, image_png]
mocked_email_msg = '''Content-Type: multipart/related; boundary="===============0998157881=="
MIME-Version: 1.0
Subject: Oops!
From: luigi@unittest
To: noone@nowhere.no,phantom@opera.fr
--===============0998157881==
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Type: text/plain; charset="utf-8"
A multiline
message.
--===============0998157881==--'''
class TestSMTPEmail(unittest.TestCase, NotificationFixture):
"""
Tests sending SMTP email.
"""
def setUp(self):
sys.modules['smtplib'] = mock.MagicMock()
import smtplib # noqa: F401
def tearDown(self):
del sys.modules['smtplib']
@with_config({"smtp": {"ssl": "False",
"host": "my.smtp.local",
"port": "999",
"local_hostname": "ptms",
"timeout": "1200",
"username": "Robin",
"password": "dooH",
"no_tls": "False"}})
def test_sends_smtp_email(self):
"""
Call notifications.send_email_smtp with fixture parameters with smtp_without_tls set to False
and check that sendmail is properly called.
"""
smtp_kws = {"host": "my.smtp.local",
"port": 999,
"local_hostname": "ptms",
"timeout": 1200}
with mock.patch('smtplib.SMTP') as SMTP:
with mock.patch('luigi.notifications.generate_email') as generate_email:
generate_email.return_value\
.as_string.return_value = self.mocked_email_msg
notifications.send_email_smtp(*self.notification_args)
SMTP.assert_called_once_with(**smtp_kws)
SMTP.return_value.login.assert_called_once_with("Robin", "dooH")
SMTP.return_value.starttls.assert_called_once_with()
SMTP.return_value.sendmail\
.assert_called_once_with(self.sender, self.recipients,
self.mocked_email_msg)
@with_config({"smtp": {"ssl": "False",
"host": "my.smtp.local",
"port": "999",
"local_hostname": "ptms",
"timeout": "1200",
"username": "Robin",
"password": "dooH",
"no_tls": "True"}})
def test_sends_smtp_email_without_tls(self):
"""
Call notifications.send_email_smtp with fixture parameters with no_tls set to True
and check that sendmail is properly called without also calling
starttls.
"""
smtp_kws = {"host": "my.smtp.local",
"port": 999,
"local_hostname": "ptms",
"timeout": 1200}
with mock.patch('smtplib.SMTP') as SMTP:
with mock.patch('luigi.notifications.generate_email') as generate_email:
generate_email.return_value \
.as_string.return_value = self.mocked_email_msg
notifications.send_email_smtp(*self.notification_args)
SMTP.assert_called_once_with(**smtp_kws)
self.assertEqual(SMTP.return_value.starttls.called, False)
SMTP.return_value.login.assert_called_once_with("Robin", "dooH")
SMTP.return_value.sendmail \
.assert_called_once_with(self.sender, self.recipients,
self.mocked_email_msg)
@with_config({"smtp": {"ssl": "False",
"host": "my.smtp.local",
"port": "999",
"local_hostname": "ptms",
"timeout": "1200",
"username": "Robin",
"password": "dooH",
"no_tls": "True"}})
def test_sends_smtp_email_exceptions(self):
"""
Call notifications.send_email_smtp when it cannot connect to smtp server (socket.error)
starttls.
"""
smtp_kws = {"host": "my.smtp.local",
"port": 999,
"local_hostname": "ptms",
"timeout": 1200}
with mock.patch('smtplib.SMTP') as SMTP:
with mock.patch('luigi.notifications.generate_email') as generate_email:
SMTP.side_effect = socket.error()
generate_email.return_value \
.as_string.return_value = self.mocked_email_msg
try:
notifications.send_email_smtp(*self.notification_args)
except socket.error:
self.fail("send_email_smtp() raised expection unexpectedly")
SMTP.assert_called_once_with(**smtp_kws)
self.assertEqual(notifications.generate_email.called, False)
self.assertEqual(SMTP.sendemail.called, False)
class TestSendgridEmail(unittest.TestCase, NotificationFixture):
"""
Tests sending Sendgrid email.
"""
def setUp(self):
sys.modules['sendgrid'] = mock.MagicMock()
import sendgrid # noqa: F401
def tearDown(self):
del sys.modules['sendgrid']
@with_config({"sendgrid": {"username": "Nikola",
"password": "jahuS"}})
def test_sends_sendgrid_email(self):
"""
Call notifications.send_email_sendgrid with fixture parameters
and check that SendGridClient is properly called.
"""
with mock.patch('sendgrid.SendGridClient') as SendgridClient:
notifications.send_email_sendgrid(*self.notification_args)
SendgridClient.assert_called_once_with("Nikola", "jahuS", raise_errors=True)
self.assertTrue(SendgridClient.return_value.send.called)
class TestSESEmail(unittest.TestCase, NotificationFixture):
"""
Tests sending email through AWS SES.
"""
def setUp(self):
sys.modules['boto3'] = mock.MagicMock()
import boto3 # noqa: F401
def tearDown(self):
del sys.modules['boto3']
@with_config({})
def test_sends_ses_email(self):
"""
Call notifications.send_email_ses with fixture parameters
and check that boto is properly called.
"""
with mock.patch('boto3.client') as boto_client:
with mock.patch('luigi.notifications.generate_email') as generate_email:
generate_email.return_value\
.as_string.return_value = self.mocked_email_msg
notifications.send_email_ses(*self.notification_args)
SES = boto_client.return_value
SES.send_raw_email.assert_called_once_with(
Source=self.sender,
Destinations=self.recipients,
RawMessage={'Data': self.mocked_email_msg})
class TestSNSNotification(unittest.TestCase, NotificationFixture):
"""
Tests sending email through AWS SNS.
"""
def setUp(self):
sys.modules['boto3'] = mock.MagicMock()
import boto3 # noqa: F401
def tearDown(self):
del sys.modules['boto3']
@with_config({})
def test_sends_sns_email(self):
"""
Call notifications.send_email_sns with fixture parameters
and check that boto3 is properly called.
"""
with mock.patch('boto3.resource') as res:
notifications.send_email_sns(*self.notification_args)
SNS = res.return_value
SNS.Topic.assert_called_once_with(self.recipients[0])
SNS.Topic.return_value.publish.assert_called_once_with(
Subject=self.subject, Message=self.message)
@with_config({})
def test_sns_subject_is_shortened(self):
"""
Call notifications.send_email_sns with too long Subject (more than 100 chars)
and check that it is cut to lenght of 100 chars.
"""
long_subject = 'Luigi: SanityCheck(regexPattern=aligned-source\\|data-not-older\\|source-chunks-compl,'\
'mailFailure=False, mongodb=mongodb://localhost/stats) FAILED'
with mock.patch('boto3.resource') as res:
notifications.send_email_sns(self.sender, long_subject, self.message,
self.recipients, self.image_png)
SNS = res.return_value
SNS.Topic.assert_called_once_with(self.recipients[0])
called_subj = SNS.Topic.return_value.publish.call_args[1]['Subject']
self.assertTrue(len(called_subj) <= 100,
"Subject can be max 100 chars long! Found {}.".format(len(called_subj)))
class TestNotificationDispatcher(unittest.TestCase, NotificationFixture):
"""
Test dispatching of notifications on configuration values.
"""
def check_dispatcher(self, target):
"""
Call notifications.send_email and test that the proper
function was called.
"""
expected_args = self.notification_args
with mock.patch('luigi.notifications.{}'.format(target)) as sender:
notifications.send_email(self.subject, self.message, self.sender,
self.recipients, image_png=self.image_png)
self.assertTrue(sender.called)
call_args = sender.call_args[0]
self.assertEqual(tuple(expected_args), call_args)
@with_config({'email': {'force_send': 'True',
'method': 'smtp'}})
def test_smtp(self):
return self.check_dispatcher('send_email_smtp')
@with_config({'email': {'force_send': 'True',
'method': 'ses'}})
def test_ses(self):
return self.check_dispatcher('send_email_ses')
@with_config({'email': {'force_send': 'True',
'method': 'sendgrid'}})
def test_sendgrid(self):
return self.check_dispatcher('send_email_sendgrid')
@with_config({'email': {'force_send': 'True',
'method': 'sns'}})
def test_sns(self):
return self.check_dispatcher('send_email_sns')
| stroykova/luigi | test/notifications_test.py | Python | apache-2.0 | 15,542 |
# Uses python3
import sys
def binary_search(a, x):
left, right = 0, len(a)
# write your code here
while left + 1 < right:
mid = left + (right - left)/2
if x == mid:
return mid
if x > mid:
left = mid
else:
right = mid
if left == x:
return left
elif right == x:
return right
else:
return -1
def linear_search(a, x):
for i in range(len(a)):
if a[i] == x:
return i
return -1
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
m = data[n + 1]
a = data[1 : n + 1]
for x in data[n + 2:]:
# replace with the call to binary_search when implemented
print(binary_search(a, x), end = ' ')
| euccas/CodingPuzzles-Python | course/ucsd_algorithm_toolbox/divide_and_conquer_starter_files/binary_search/binary_search.py | Python | mit | 815 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('frenzied_graul')
mobileTemplate.setLevel(72)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(5)
mobileTemplate.setMaxSpawnDistance(10)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(1025)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setBoneAmount(950)
mobileTemplate.setBoneType("Animal Bone")
mobileTemplate.setHideAmount(855)
mobileTemplate.setSocialGroup("graul")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_slinking_voritor_hunter.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_dampen_pain_5')
attacks.add('bm_shaken_3')
attacks.add('bm_stomp_5')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('frenzied_graul', mobileTemplate)
return | agry/NGECore2 | scripts/mobiles/dantooine/frenzied_graul.py | Python | lgpl-3.0 | 1,675 |
from __future__ import with_statement
import os, sys, re, pynav, time, datetime, pytz ,pyaeso, spharm, matplotlib, xml_marshaller, xmlbuilder
import numpy as np
from pynav import Pynav
from xml_marshaller import xml_marshaller
from xml_marshaller.xml_marshaller import *
from xmlbuilder import XMLBuilder
from PyNOAAGeoMagIndiceHandler import decorator
from decorator import DictAssign
"""
Sample Of Ozone NOAA Data received:
111 1975 01 23 12 22.4
111 1975 01 23 13 21.4
111 1975 01 23 14 20.9
111 1975 01 23 15 20.9
111 1975 01 23 16 20.9
111 1975 01 23 17 20.9
111 1975 01 23 18 21.4
111 1975 01 23 19 21.9
111 1975 01 23 20 22.4
111 1975 01 23 22 22.9
111 1975 01 23 23 22.9
111 1975 01 23 24 22.4
"""
class NOAADataReference( object ):
class NOAADataReferenceImpl( object ):
DictReference={
'field':{
'name':'dict',
'value':[ 'NOAADictCollector' ],
'dict':{
'name':'position',
'value':[ 'system' ],
'position':{
'name':'localtion',
'value':[ 'earth','sonde','satellite' ], },
'localtion':{
'name':'site',
'value':[ 'spo','sum','thd','smo','rpb','nwr','mlo','ice','brw','bmw','arh' ] },
'site':{
'name':'detector',
'value':['ozone'],
'detector':{
'name':['stringfield','listfield','collectionfield'],
'value':[ 'title','field','laps','url','1m','5m','1h','12h','24h','1w','1m','1y','2y' ],
'stringfield':{
'name':'str',
'value':[ 'title', 'url'] },
'listfield':{
'name':'list',
'value':['field'] },
'collectionfield':{
'name':'dict',
'value':['laps','1m','5m','1h','12h','24h','1w','1m','1y','2y'] }
}
}
}
}
}
NOAADictCollector={
'system':{
'earth':{
'spo':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/south_pole/spclsoz' } } } },
'sum':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/summit_greenland/sutclsoz' } } } },
'thd':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/trinidad_head/thtclsoz' } } } },
'smo':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/samoa/smclsoz' } } } },
'rpb':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/barbados/baclsoz' } } } },
'nwr':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/niwot_ridge/nwclsoz' } } } },
'mlo':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/mauna_loa/mlclsoz' } } } },
'ice':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/westman_iceland/vmclsoz' } } } },
'brw':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/barrow/brclsoz' } } } },
'bmw':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/bermuda/bmclsoz' } } } },
'arh':{
'ozone':{
'laps':{
'1h':{
'url':'ftp://ftp.cmdl.noaa.gov/ozwv/surfo3/arrival_heights/ahclsoz' } } } } } } }
NodeImpl=NOAADataReferenceImpl()
class NOAADataReferenceFactory( object ):
DictReference={ 'name':None, 'value':[] }
DictFactory={}
CurrentID=0
NewAllocName=None
NewAllocValue=None
NewAllocContent=None
def GetNewAlloc( self ):
return self.NewAllocContent
def SetNewAlloc( self, value ):
self.NewAllocName, self.NewAllocContent = value
self.CurrentID+=1
self.NewDict={ self.CurrentID:dict( self.DictReference ) }
self.NewDict[self.CurrentID]['name']=self.NewAllocName
self.NewDict[self.CurrentID]['value']=self.NewAllocContent
self.DictFactory.update( self.NewDict )
PropertyNewAlloc=property( GetNewAlloc, SetNewAlloc )
ID=None
def GetID( self ):
return self.Name
def SetID( self, value ):
self.ID = value
PropertyID=property( GetID, SetID )
Name=None
def GetName( self ):
return self.Name
def SetName( self, value ):
self.Name = value
PropertyName=property( GetName, SetName )
Value=None
def GetValue( self ):
return self.Value
def SetValue( self, value ):
self.Value = value
PropertyValue=property( GetValue, SetValue )
def __init__( self , **Kargs ):
for ItemKey in Kargs.keys():
setattr( self.Data, ItemKey, Kargs[ItemKey] )
if __name__.__eq__( '__main__' ):
ObjNOAATest=NOAADataReference()
| priendeau/PyNOAAGeoMagIndiceHandler | PyNOAAGeoMagIndiceHandler/NOAADataCenterCollection.py | Python | bsd-3-clause | 5,441 |
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def dfs(root, num, s):
if root is None:
return 0
num = num * 10 + root.val
if root.left is None and root.right is None:
s = num
return s
s = dfs(root.left, num, s) + dfs(root.right, num, s)
return s
if root is None:
return 0
return dfs(root, 0, 0)
| ChuanleiGuo/AlgorithmsPlayground | LeetCodeSolutions/python/129_Sum_Root_to_Leaf_Numbers.py | Python | mit | 648 |
# -*- coding: utf-8 -*-
"""
test.t_controlbeast.t_ssh.test_CbSSHApi
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2013 by the ControlBeast team, see AUTHORS.
:license: ISC, see LICENSE for details.
"""
from unittest import TestCase
from controlbeast.ssh.api import CbSSHLib
class TestCbSSHApi(TestCase):
"""
Class providing unit tests for SSH API.
**Covered test cases:**
============== ========================================================================================
Test Case Description
============== ========================================================================================
01 Try instantiating an SSH API object by its *get_instance* class method.
02 Try instantiating an SSH API object directly.
03 Compare two :py:class:`~controlbeast.ssh.api.CbSSHLib` instances.
============== ========================================================================================
"""
def test_01(self):
"""
Test Case 01:
Try instantiating an SSH API object by its *get_instance* class method.
Test is passed if the API instance proves being a :py:class:`~controlbeast.ssh.api.CbSSHLib` instance.
"""
obj = CbSSHLib.get_instance()
self.assertIsInstance(obj, CbSSHLib)
def test_02(self):
"""
Test Case 02:
Try instantiating an SSH API object directly.
Test is passed if a :py:exc:`TypeError` exception is raised.
"""
with self.assertRaises(TypeError):
obj = CbSSHLib()
def test_03(self):
"""
Test Case 03:
Compare two :py:class:`~controlbeast.ssh.api.CbSSHLib` instances.
Test is passed if both instances are identical.
"""
obj_1 = CbSSHLib.get_instance()
obj_2 = CbSSHLib.get_instance()
self.assertEqual(obj_1, obj_2)
| daemotron/controlbeast | test/t_controlbeast/t_ssh/test_CbSSHApi.py | Python | isc | 1,963 |
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import logging
import random
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
import six.moves.urllib.parse as urlparse
from ironic.common import exception
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_GLANCE_API_SERVER = None
""" iterator that cycles (indefinitely) over glance API servers. """
def generate_glance_url():
"""Generate the URL to glance."""
return "%s://%s:%d" % (CONF.glance.glance_protocol,
CONF.glance.glance_host,
CONF.glance.glance_port)
def generate_image_url(image_ref):
"""Generate an image URL from an image_ref."""
return "%s/images/%s" % (generate_glance_url(), image_ref)
def _extract_attributes(image):
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'checksum', 'id',
'name', 'created_at', 'updated_at',
'deleted_at', 'deleted', 'status',
'min_disk', 'min_ram', 'is_public']
IMAGE_ATTRIBUTES_V2 = ['tags', 'visibility', 'protected',
'file', 'schema']
output = {}
for attr in IMAGE_ATTRIBUTES:
output[attr] = getattr(image, attr, None)
output['properties'] = getattr(image, 'properties', {})
if hasattr(image, 'schema') and 'v2' in image['schema']:
IMAGE_ATTRIBUTES = IMAGE_ATTRIBUTES + IMAGE_ATTRIBUTES_V2
for attr in IMAGE_ATTRIBUTES_V2:
output[attr] = getattr(image, attr, None)
output['schema'] = image['schema']
for image_property in set(image.keys()) - set(IMAGE_ATTRIBUTES):
output['properties'][image_property] = image[image_property]
return output
def _convert_timestamps_to_datetimes(image_meta):
"""Returns image with timestamp fields converted to datetime objects."""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image_meta.get(attr):
image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
return image_meta
_CONVERT_PROPS = ('block_device_mapping', 'mappings')
def _convert(metadata, method):
metadata = copy.deepcopy(metadata)
properties = metadata.get('properties')
if properties:
for attr in _CONVERT_PROPS:
if attr in properties:
prop = properties[attr]
if method == 'from':
if isinstance(prop, six.string_types):
properties[attr] = jsonutils.loads(prop)
if method == 'to':
if not isinstance(prop, six.string_types):
properties[attr] = jsonutils.dumps(prop)
return metadata
def _remove_read_only(image_meta):
IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at']
output = copy.deepcopy(image_meta)
for attr in IMAGE_ATTRIBUTES:
if attr in output:
del output[attr]
return output
def _get_api_server_iterator():
"""Return iterator over shuffled API servers.
Shuffle a list of CONF.glance.glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning if
necessary.
If CONF.glance.glance_api_servers isn't set, we fall back to using this
as the server: CONF.glance.glance_host:CONF.glance.glance_port.
:returns: iterator that cycles (indefinitely) over shuffled glance API
servers. The iterator returns tuples of (host, port, use_ssl).
"""
api_servers = []
configured_servers = (CONF.glance.glance_api_servers or
['%s:%s' % (CONF.glance.glance_host,
CONF.glance.glance_port)])
for api_server in configured_servers:
if '//' not in api_server:
api_server = '%s://%s' % (CONF.glance.glance_protocol, api_server)
url = urlparse.urlparse(api_server)
port = url.port or 80
host = url.netloc.split(':', 1)[0]
use_ssl = (url.scheme == 'https')
api_servers.append((host, port, use_ssl))
random.shuffle(api_servers)
return itertools.cycle(api_servers)
def _get_api_server():
"""Return a Glance API server.
:returns: for an API server, the tuple (host-or-IP, port, use_ssl), where
use_ssl is True to use the 'https' scheme, and False to use 'http'.
"""
global _GLANCE_API_SERVER
if not _GLANCE_API_SERVER:
_GLANCE_API_SERVER = _get_api_server_iterator()
return six.next(_GLANCE_API_SERVER)
def parse_image_ref(image_href):
"""Parse an image href into composite parts.
:param image_href: href of an image
:returns: a tuple of the form (image_id, host, port, use_ssl)
:raises ValueError
"""
if '/' not in str(image_href):
image_id = image_href
(glance_host, glance_port, use_ssl) = _get_api_server()
return (image_id, glance_host, glance_port, use_ssl)
else:
try:
url = urlparse.urlparse(image_href)
if url.scheme == 'glance':
(glance_host, glance_port, use_ssl) = _get_api_server()
image_id = image_href.split('/')[-1]
else:
glance_port = url.port or 80
glance_host = url.netloc.split(':', 1)[0]
image_id = url.path.split('/')[-1]
use_ssl = (url.scheme == 'https')
return (image_id, glance_host, glance_port, use_ssl)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
def extract_query_params(params, version):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'sort_key', 'sort_dir')
for param in accepted_params:
if params.get(param):
_params[param] = params.get(param)
# ensure filters is a dict
_params.setdefault('filters', {})
# NOTE(vish): don't filter out private images
# NOTE(ghe): in v2, not passing any visibility doesn't filter prvate images
if version == 1:
_params['filters'].setdefault('is_public', 'none')
return _params
def translate_to_glance(image_meta):
image_meta = _convert(image_meta, 'to')
image_meta = _remove_read_only(image_meta)
return image_meta
def translate_from_glance(image):
image_meta = _extract_attributes(image)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert(image_meta, 'from')
return image_meta
def is_image_available(context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
if image.is_public or context.is_admin:
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
return str(properties['owner_id']) == str(context.project_id)
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
def is_glance_image(image_href):
if not isinstance(image_href, six.string_types):
return False
return (image_href.startswith('glance://') or
uuidutils.is_uuid_like(image_href))
| naototty/vagrant-lxc-ironic | ironic/common/glance_service/service_utils.py | Python | apache-2.0 | 8,408 |
from django import template
from django.conf import settings
register = template.Library()
@register.assignment_tag
def get_language_byindex(index):
lang = ('', '')
try:
lang = settings.LANGUAGES[index]
except KeyError:
pass
except IndexError:
pass
return lang
| kimus/django-blocks | blocks/templatetags/blocks_admin.py | Python | mit | 275 |
import itertools
import math
import unittest
from collections import Counter, OrderedDict
import pytest
from boofuzz import *
@pytest.fixture(autouse=True)
def clear_requests():
yield
blocks.REQUESTS = {}
blocks.CURRENT = None
class TestString(unittest.TestCase):
def _given_string(self):
self.default_value = "ABCDEFGH"
self.default_default_value = "\x00" * len(self.default_value)
return String(name="boofuzz-unit-test-name", default_value=self.default_value)
def _given_string_max_len(self, max_len):
self.default_value = "ABCDEFGH"
self.default_default_value = "\x00" * len(self.default_value)
return String(name="boofuzz-unit-test-name", default_value=self.default_value, max_len=max_len)
def _given_string_size(self, size, padding, encoding):
self.default_value = "ABCDEFGH"
self.default_default_value = "\x00" * len(self.default_value)
return String(
name="boofuzz-unit-test-name",
default_value=self.default_value,
size=size,
padding=padding,
encoding=encoding,
)
def test_mutations(self):
uut = self._given_string()
generator = uut.mutations(default_value=self.default_default_value)
n = 0
for expected, actual in zip(String._fuzz_library, generator):
n += 1
self.assertEqual(expected, actual)
for expected, actual in zip(String._variable_mutation_multipliers, generator):
n += 1
self.assertEqual(self.default_default_value * expected, actual)
for sequence in String.long_string_seeds:
for size in [
length + delta
for length, delta in itertools.product(String._long_string_lengths, String._long_string_deltas)
]:
n += 1
expected = sequence * math.ceil(size / len(sequence))
self.assertEqual(expected[:size], next(generator))
for size in String._extra_long_string_lengths:
n += 1
expected = sequence * math.ceil(size / len(sequence))
self.assertEqual(expected[:size], next(generator))
for size in String._long_string_lengths:
s = "D" * size
for loc in uut.random_indices[size]:
n += 1
expected = s[:loc] + "\x00" + s[loc + 1 :]
self.assertEqual(expected, next(generator))
self.assertRaises(StopIteration, lambda: next(generator))
self.assertEqual(n, uut.num_mutations(default_value=self.default_value))
list_of_duplicates = [
item for item, count in Counter(uut.mutations(default_value=self.default_value)).items() if count > 1
]
self.assertEqual(0, len(list_of_duplicates))
def test_mutations_max_len(self):
lengths = [5, 10, 128, 1000, 100000]
for max_len in lengths:
uut = self._given_string_max_len(max_len=max_len)
generator = uut.mutations(default_value=self.default_default_value)
def truncate(b):
return b[:max_len]
n = 0
for expected, actual in zip(OrderedDict.fromkeys(list(map(truncate, String._fuzz_library))), generator):
n += 1
self.assertEqual(expected, actual)
for expected, actual in zip(String._variable_mutation_multipliers, generator):
n += 1
self.assertEqual(truncate(self.default_default_value * expected), actual)
if max_len <= len(self.default_default_value * expected):
break
for sequence in String.long_string_seeds:
for size in [
length + delta
for length, delta in itertools.product(String._long_string_lengths, String._long_string_deltas)
]:
if size <= max_len:
n += 1
expected = sequence * math.ceil(size / len(sequence))
self.assertEqual(truncate(expected[:size]), next(generator))
for size in String._extra_long_string_lengths:
if size <= max_len:
n += 1
expected = sequence * math.ceil(size / len(sequence))
self.assertEqual(truncate(expected[:size]), next(generator))
if max_len not in String._extra_long_string_lengths + [
length + delta
for length, delta in itertools.product(String._long_string_lengths, String._long_string_deltas)
]:
n += 1
expected = sequence * math.ceil(max_len / len(sequence))
self.assertEqual(truncate(expected), next(generator))
for size in String._long_string_lengths:
if size <= max_len:
s = "D" * size
for loc in uut.random_indices[size]:
expected = s[:loc] + "\x00" + s[loc + 1 :]
n += 1
self.assertEqual(truncate(expected), next(generator))
self.assertRaises(StopIteration, lambda: next(generator))
self.assertEqual(n, uut.num_mutations(default_value=self.default_value))
list_of_duplicates = [
item for item, count in Counter(uut.mutations(default_value=self.default_value)).items() if count > 1
]
self.assertEqual(0, len(list_of_duplicates))
def test_mutations_size(self):
lengths = [5, 10, 128, 1000, 100000]
pad = b"\x41"
encoding = "utf-8"
def fit_to_size(b):
b = b[:max_len].encode(encoding=encoding)
pad_len = max(0, max_len - len(b))
return b + pad * pad_len
for max_len in lengths:
uut = self._given_string_size(size=max_len, padding=pad, encoding=encoding)
generator = uut.mutations(default_value=self.default_default_value)
n = 0
for expected, actual in zip(OrderedDict.fromkeys(list(map(fit_to_size, String._fuzz_library))), generator):
n += 1
self.assertEqual(expected, uut.encode(actual))
for expected, actual in zip(String._variable_mutation_multipliers, generator):
n += 1
self.assertEqual(fit_to_size(self.default_default_value * expected), uut.encode(actual))
if max_len <= len(self.default_default_value * expected):
break
for sequence in String.long_string_seeds:
for size in [
length + delta
for length, delta in itertools.product(String._long_string_lengths, String._long_string_deltas)
]:
if size <= max_len:
n += 1
expected = sequence * math.ceil(size / len(sequence))
self.assertEqual(fit_to_size(expected[:size]), uut.encode(next(generator)))
for size in String._extra_long_string_lengths:
if size <= max_len:
n += 1
expected = sequence * math.ceil(size / len(sequence))
self.assertEqual(fit_to_size(expected[:size]), uut.encode(next(generator)))
if max_len not in String._extra_long_string_lengths + [
length + delta
for length, delta in itertools.product(String._long_string_lengths, String._long_string_deltas)
]:
n += 1
expected = sequence * math.ceil(max_len / len(sequence))
self.assertEqual(fit_to_size(expected), uut.encode(next(generator)))
for length in String._long_string_lengths:
if length <= max_len:
s = "D" * length
for loc in uut.random_indices[length]:
expected = s[:loc] + "\x00" + s[loc + 1 :]
n += 1
self.assertEqual(fit_to_size(expected), uut.encode(next(generator)))
self.assertRaises(StopIteration, lambda: next(generator))
self.assertEqual(n, uut.num_mutations(default_value=self.default_value))
list_of_duplicates = [
item for item, count in Counter(uut.mutations(default_value=self.default_value)).items() if count > 1
]
self.assertEqual(0, len(list_of_duplicates))
if __name__ == "__main__":
unittest.main()
| jtpereyda/boofuzz | unit_tests/test_string.py | Python | gpl-2.0 | 8,751 |
__version__ = '5.6.9'
| repotvsupertuga/tvsupertuga.repository | script.premium.TVsupertuga/resources/lib/external/jsbeautifier/__version__.py | Python | gpl-2.0 | 22 |
# Кириллов Алексей, ИУ7-22
# Защита (текстовые БД)
base = open('ABC.txt','r')
s = base.read(1)
long = ''
predl = ''
n = -1
tek = 0
prev = True
thispred = False
glasn = ['a','A','e','E','y','Y','u','U','i','I','o','O']
while s != '':
if s == '\n':
s = base.read(1)
continue
if s == '.':
for i in predl:
if i in glasn:
prev = False
if tek >= n:
n = tek
thispred = True
tek = 0
else:
prev = True
tek += 1
if thispred:
thispred = False
long = predl
predl = ''
else:
predl += s
s = base.read(1)
print(long,n)
| aspadm/labworks | module1-2/protect_base.py | Python | mit | 854 |
"""
Test that old keys deserialize just by importing opaque keys
"""
from unittest import TestCase
from opaque_keys.edx.keys import CourseKey, LearningContextKey, UsageKey
class TestDefault(TestCase):
"""
Check that clients which merely import CourseKey can deserialize the expected keys, etc
"""
def test_course_key(self):
"""
Test CourseKey
"""
key = CourseKey.from_string('org.id/course_id/run')
self.assertEqual(key.org, 'org.id')
key = CourseKey.from_string('course-v1:org.id+course_id+run')
self.assertEqual(key.org, 'org.id')
def test_learning_context_key(self):
"""
Test CourseKey
"""
key = LearningContextKey.from_string('org.id/course_id/run')
self.assertEqual(key.org, 'org.id')
self.assertIsInstance(key, CourseKey)
key = LearningContextKey.from_string('course-v1:org.id+course_id+run')
self.assertEqual(key.org, 'org.id')
self.assertIsInstance(key, CourseKey)
def test_usage_key(self):
"""
Test UsageKey
"""
key = UsageKey.from_string('i4x://org.id/course_id/category/block_id')
self.assertEqual(key.block_id, 'block_id')
key = UsageKey.from_string('block-v1:org.id+course_id+run+type@category+block@block_id')
self.assertEqual(key.block_id, 'block_id')
| edx/opaque-keys | opaque_keys/edx/tests/test_default_deprecated.py | Python | agpl-3.0 | 1,382 |
from __future__ import absolute_import, unicode_literals
import logging
from cinnabar.cmd.util import CLI
from cinnabar.git import (
Git,
GitProcess,
InvalidConfig,
)
from cinnabar.githg import GitHgStore
from cinnabar.helper import GitHgHelper
from cinnabar.hg.bundle import (
create_bundle,
PushStore,
)
from cinnabar.hg.repo import (
BundleApplier,
get_bundle,
get_clonebundle,
get_repo,
Remote,
unbundle20,
unbundler,
)
from cinnabar.util import fsencode
@CLI.subcommand
@CLI.argument('--version', choices=(1, 2), type=int,
default=2 if unbundle20 else 1,
help='bundle version')
@CLI.argument('path', help='path of the bundle')
@CLI.argument('rev', nargs='+',
help='git revision range (see the Specifying Ranges'
' section of gitrevisions(7))')
def bundle(args):
'''create a mercurial bundle'''
revs = [fsencode(r) for r in args.rev]
bundle_commits = list((c, p) for c, t, p in GitHgHelper.rev_list(
b'--topo-order', b'--full-history', b'--parents', b'--reverse', *revs))
if bundle_commits:
# TODO: better UX. For instance, this will fail with an exception when
# the parent commit doesn't have mercurial metadata.
GRAFT = {
None: False,
b'false': False,
b'true': True,
}
try:
graft = Git.config('cinnabar.graft', values=GRAFT)
except InvalidConfig as e:
logging.error(str(e))
return 1
store = PushStore(graft=graft)
if args.version == 1:
b2caps = {}
elif args.version == 2:
b2caps = {
b'HG20': (),
b'changegroup': (b'01', b'02'),
}
with open(args.path, 'wb') as fh:
if not b2caps:
fh.write(b'HG10UN')
for data in create_bundle(store, bundle_commits, b2caps):
fh.write(data)
store.close(rollback=True)
@CLI.subcommand
@CLI.argument('--clonebundle', action='store_true',
help='get clone bundle from given repository')
@CLI.argument('url', help='url of the bundle')
def unbundle(args):
'''apply a mercurial bundle to the repository'''
# Make git emit its error when the current directory is not in a git repo.
proc = GitProcess('rev-parse')
ret = proc.wait()
if ret:
return ret
remote = Remote(b'', fsencode(args.url))
if remote.parsed_url.scheme not in (b'file', b'http', b'https'):
logging.error('%s urls are not supported.' % remote.parsed_url.scheme)
return 1
if args.clonebundle:
repo = get_repo(remote)
if not repo.capable(b'clonebundles'):
logging.error('Repository does not support clonebundles')
return 1
bundle = get_clonebundle(repo)
else:
bundle = get_bundle(remote.url)
store = GitHgStore()
GRAFT = {
None: False,
b'false': False,
b'true': True,
}
try:
graft = Git.config('cinnabar.graft', values=GRAFT)
except InvalidConfig as e:
logging.error(str(e))
return 1
if graft:
store.prepare_graft()
bundle = unbundler(bundle)
apply_bundle = BundleApplier(bundle)
del bundle
apply_bundle(store)
store.close()
| glandium/git-cinnabar | cinnabar/cmd/bundle.py | Python | gpl-2.0 | 3,371 |
#!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy import Column
from sqlalchemy import Table
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import mapper
from sqlalchemy.orm import sessionmaker
from spyne.model import XmlAttribute, File
from spyne.model import XmlData
from spyne.model import ComplexModel
from spyne.model import Array
from spyne.model import Integer32
from spyne.model import Unicode
from spyne.model import Integer
from spyne.model import Enum
from spyne.model import TTableModel
from spyne.model.binary import HybridFileStore
from spyne.model.complex import xml
from spyne.model.complex import table
TableModel = TTableModel()
class TestSqlAlchemySchema(unittest.TestCase):
def setUp(self):
logging.getLogger('sqlalchemy').setLevel(logging.DEBUG)
self.engine = create_engine('sqlite:///:memory:')
self.session = sessionmaker(bind=self.engine)()
self.metadata = TableModel.Attributes.sqla_metadata = MetaData()
self.metadata.bind = self.engine
def test_schema(self):
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True, autoincrement=False)
s = Unicode(64, unique=True)
i = Integer32(64, index=True)
t = SomeClass.__table__
self.metadata.create_all() # not needed, just nice to see.
assert t.c.id.primary_key == True
assert t.c.id.autoincrement == False
indexes = list(t.indexes)
indexes.sort(key=lambda idx: idx.columns)
for idx in indexes:
assert 'i' in idx.columns or 's' in idx.columns
if 's' in idx.columns:
assert idx.unique
def test_nested_sql(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = (
{"sqlite_autoincrement": True},
)
id = Integer32(primary_key=True)
o = SomeOtherClass.customize(store_as='table')
self.metadata.create_all()
soc = SomeOtherClass(s='ehe')
sc = SomeClass(o=soc)
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
print(sc_db)
assert sc_db.o.s == 'ehe'
assert sc_db.o_id == 1
sc_db.o = None
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
assert sc_db.o == None
assert sc_db.o_id == None
def test_nested_sql_array_as_table(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as='table')
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
assert sc_db.others[0].s == 'ehe1'
assert sc_db.others[1].s == 'ehe2'
self.session.close()
def test_nested_sql_array_as_multi_table(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as=table(multi=True))
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
assert sc_db.others[0].s == 'ehe1'
assert sc_db.others[1].s == 'ehe2'
self.session.close()
def test_nested_sql_array_as_multi_table_with_backref(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as=table(multi=True, backref='some_classes'))
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
soc_db = self.session.query(SomeOtherClass).all()
assert soc_db[0].some_classes[0].id == 1
assert soc_db[1].some_classes[0].id == 1
self.session.close()
def test_nested_sql_array_as_xml(self):
class SomeOtherClass(ComplexModel):
id = Integer32
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as='xml')
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
assert sc_db.others[0].s == 'ehe1'
assert sc_db.others[1].s == 'ehe2'
self.session.close()
def test_nested_sql_array_as_xml_no_ns(self):
class SomeOtherClass(ComplexModel):
id = Integer32
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as=xml(no_ns=True))
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_xml = self.session.connection().execute("select others from some_class") \
.fetchall()[0][0]
from lxml import etree
assert etree.fromstring(sc_xml).tag == 'SomeOtherClassArray'
self.session.close()
def test_inheritance(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(SomeOtherClass):
numbers = Array(Integer32).store_as(xml(no_ns=True, root_tag='a'))
self.metadata.create_all()
sc = SomeClass(id=5, s='s', numbers=[1,2,3,4])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(5)
assert sc_db.numbers == [1, 2, 3, 4]
self.session.close()
sc_db = self.session.query(SomeOtherClass).get(5)
assert sc_db.id == 5
try:
sc_db.numbers
except AttributeError:
pass
else:
raise Exception("must fail")
self.session.close()
def test_sqlalchemy_inheritance(self):
# no spyne code is involved here.
# this is just to test test the sqlalchemy behavior that we rely on.
class Employee(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.__class__.__name__ + " " + self.name
class Manager(Employee):
def __init__(self, name, manager_data):
self.name = name
self.manager_data = manager_data
def __repr__(self):
return (
self.__class__.__name__ + " " +
self.name + " " + self.manager_data
)
class Engineer(Employee):
def __init__(self, name, engineer_info):
self.name = name
self.engineer_info = engineer_info
def __repr__(self):
return (
self.__class__.__name__ + " " +
self.name + " " + self.engineer_info
)
employees_table = Table('employees', self.metadata,
Column('employee_id', sqlalchemy.Integer, primary_key=True),
Column('name', sqlalchemy.String(50)),
Column('manager_data', sqlalchemy.String(50)),
Column('engineer_info', sqlalchemy.String(50)),
Column('type', sqlalchemy.String(20), nullable=False)
)
employee_mapper = mapper(Employee, employees_table,
polymorphic_on=employees_table.c.type, polymorphic_identity='employee')
manager_mapper = mapper(Manager, inherits=employee_mapper,
polymorphic_identity='manager')
engineer_mapper = mapper(Engineer, inherits=employee_mapper,
polymorphic_identity='engineer')
self.metadata.create_all()
manager = Manager('name', 'data')
self.session.add(manager)
self.session.commit()
self.session.close()
assert self.session.query(Employee).with_polymorphic('*').filter_by(employee_id=1).one().type == 'manager'
def test_inheritance_polymorphic_with_non_nullables_in_subclasses(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True} # this is sqlite-specific
__mapper_args__ = (
(),
{'polymorphic_on': 't', 'polymorphic_identity': 1},
)
id = Integer32(primary_key=True)
t = Integer32(nillable=False)
s = Unicode(64, nillable=False)
class SomeClass(SomeOtherClass):
__mapper_args__ = (
(),
{'polymorphic_identity': 2},
)
i = Integer(nillable=False)
self.metadata.create_all()
assert SomeOtherClass.__table__.c.s.nullable == False
# this should be nullable to let other classes be added.
# spyne still checks this constraint when doing input validation.
# spyne should generate a constraint to check this at database level as
# well.
assert SomeOtherClass.__table__.c.i.nullable == True
soc = SomeOtherClass(s='s')
self.session.add(soc)
self.session.commit()
soc_id = soc.id
try:
sc = SomeClass(i=5)
self.session.add(sc)
self.session.commit()
except IntegrityError:
self.session.rollback()
else:
raise Exception("Must fail with IntegrityError.")
sc2 = SomeClass(s='s') # this won't fail. should it?
self.session.add(sc2)
self.session.commit()
self.session.expunge_all()
assert self.session.query(SomeOtherClass).with_polymorphic('*').filter_by(id=soc_id).one().t == 1
self.session.close()
def test_inheritance_polymorphic(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True} # this is sqlite-specific
__mapper_args__ = {'polymorphic_on': 't', 'polymorphic_identity': 1}
id = Integer32(primary_key=True)
s = Unicode(64)
t = Integer32(nillable=False)
class SomeClass(SomeOtherClass):
__mapper_args__ = {'polymorphic_identity': 2}
numbers = Array(Integer32).store_as(xml(no_ns=True, root_tag='a'))
self.metadata.create_all()
sc = SomeClass(id=5, s='s', numbers=[1,2,3,4])
self.session.add(sc)
self.session.commit()
self.session.close()
assert self.session.query(SomeOtherClass).with_polymorphic('*').filter_by(id=5).one().t == 2
self.session.close()
def test_nested_sql_array_as_json(self):
class SomeOtherClass(ComplexModel):
id = Integer32
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as='json')
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
assert sc_db.others[0].s == 'ehe1'
assert sc_db.others[1].s == 'ehe2'
self.session.close()
def test_modifiers(self):
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
i = XmlAttribute(Integer32(pk=True))
s = XmlData(Unicode(64))
self.metadata.create_all()
self.session.add(SomeClass(s='s'))
self.session.commit()
self.session.expunge_all()
ret = self.session.query(SomeClass).get(1)
assert ret.i == 1 # redundant
assert ret.s == 's'
def test_default_ctor(self):
class SomeOtherClass(ComplexModel):
id = Integer32
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as='json')
f = Unicode(32, default='uuu')
self.metadata.create_all()
self.session.add(SomeClass())
self.session.commit()
self.session.expunge_all()
assert self.session.query(SomeClass).get(1).f == 'uuu'
def test_default_ctor_with_sql_relationship(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = (
{"sqlite_autoincrement": True},
)
id = Integer32(primary_key=True)
o = SomeOtherClass.customize(store_as='table')
self.metadata.create_all()
self.session.add(SomeClass())
self.session.commit()
def test_store_as_index(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = (
{"sqlite_autoincrement": True},
)
id = Integer32(primary_key=True)
o = SomeOtherClass.customize(store_as='table', index='btree')
self.metadata.create_all()
idx, = SomeClass.__table__.indexes
assert 'o_id' in idx.columns
def test_scalar_collection(self):
class SomeClass(TableModel):
__tablename__ = 'some_class'
id = Integer32(primary_key=True)
values = Array(Unicode).store_as('table')
self.metadata.create_all()
self.session.add(SomeClass(id=1, values=['a', 'b', 'c']))
self.session.commit()
sc = self.session.query(SomeClass).get(1)
assert sc.values == ['a', 'b', 'c']
del sc
sc = self.session.query(SomeClass).get(1)
sc.values.append('d')
self.session.commit()
del sc
sc = self.session.query(SomeClass).get(1)
assert sc.values == ['a', 'b', 'c', 'd']
sc = self.session.query(SomeClass).get(1)
sc.values = sc.values[1:]
self.session.commit()
del sc
sc = self.session.query(SomeClass).get(1)
assert sc.values == ['b', 'c', 'd']
def test_multiple_fk(self):
class SomeChildClass(TableModel):
__tablename__ = 'some_child_class'
id = Integer32(primary_key=True)
s = Unicode(64)
i = Integer32
class SomeClass(TableModel):
__tablename__ = 'some_class'
id = Integer32(primary_key=True)
children = Array(SomeChildClass).store_as('table')
mirror = SomeChildClass.store_as('table')
self.metadata.create_all()
children = [
SomeChildClass(s='p', i=600),
SomeChildClass(s='|', i=10),
SomeChildClass(s='q', i=9),
]
sc = SomeClass(children=children)
self.session.add(sc)
self.session.flush()
sc.mirror = children[1]
self.session.commit()
del sc
sc = self.session.query(SomeClass).get(1)
assert ''.join([scc.s for scc in sc.children]) == 'p|q'
assert sum([scc.i for scc in sc.children]) == 619
def test_reflection(self):
class SomeChildClass(TableModel):
__tablename__ = 'some_child_class'
id = Integer32(primary_key=True)
s = Unicode(64)
i = Integer32
class SomeClass(TableModel):
__tablename__ = 'some_class'
id = Integer32(primary_key=True)
children = Array(SomeChildClass).store_as('xml')
mirror = SomeChildClass.store_as('json')
metadata2 = MetaData()
metadata2.bind = self.engine
metadata2.reflect()
def _test_sqlalchemy_remapping(self):
class SomeTable(TableModel):
__tablename__ = 'some_table'
id = Integer32(pk=True)
i = Integer32
s = Unicode(32)
class SomeTableSubset(TableModel):
__table__ = SomeTable.__table__
id = Integer32(pk=True) # sqla session doesn't work without pk
i = Integer32
class SomeTableOtherSubset(TableModel):
__table__ = SomeTable.__table__
_type_info = [(k,v) for k, v in SomeTable._type_info.items() if k in ('id', 's')]
self.session.add(SomeTable(id=1,i=2,s='s'))
self.session.commit()
st = self.session.query(SomeTableSubset).get(1)
sts = self.session.query(SomeTableOtherSubset).get(1)
stos = self.session.query(SomeTableSubset).get(1)
sts.i = 3
sts.s = 'ss' # will not be flushed to db
self.session.commit()
assert st.s == 's'
assert stos.i == 3
def test_file_storage(self):
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
f = File(store_as=HybridFileStore('store', 'json'))
self.metadata.create_all()
c = C(f=File.Value(name="name", type="type", data=["data"]))
self.session.add(c)
self.session.flush()
self.session.commit()
c = self.session.query(C).get(1)
print(c)
assert c.f.name == "name"
assert c.f.type == "type"
assert str(c.f.data[0][:]) == "data"
def test_add_field_complex_existing_column(self):
class C(TableModel):
__tablename__ = "c"
u = Unicode(pk=True)
class D(TableModel):
__tablename__ = "d"
d = Integer32(pk=True)
c = C.store_as('table')
C.append_field('d', D.store_as('table'))
assert C.Attributes.sqla_mapper.get_property('d').argument is D
def _test_add_field_complex_explicit_existing_column(self):
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
# c already also produces c_id. this is undefined behaviour, one of them
# gets ignored, whichever comes first.
class D(TableModel):
__tablename__ = "d"
id = Integer32(pk=True)
c = C.store_as('table')
c_id = Integer32(15)
def test_add_field_complex_circular_array(self):
class C(TableModel):
__tablename__ = "cc"
id = Integer32(pk=True)
class D(TableModel):
__tablename__ = "dd"
id = Integer32(pk=True)
c = Array(C).customize(store_as=table(right='dd_id'))
C.append_field('d', D.customize(store_as=table(left='dd_id')))
self.metadata.create_all()
c1, c2 = C(id=1), C(id=2)
d = D(id=1, c=[c1,c2])
self.session.add(d)
self.session.commit()
assert c1.d.id == 1
def test_add_field_complex_new_column(self):
class C(TableModel):
__tablename__ = "c"
u = Unicode(pk=True)
class D(TableModel):
__tablename__ = "d"
id = Integer32(pk=True)
C.append_field('d', D.store_as('table'))
assert C.Attributes.sqla_mapper.get_property('d').argument is D
assert isinstance(C.Attributes.sqla_table.c['d_id'].type, sqlalchemy.Integer)
def test_add_field_array(self):
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
class D(TableModel):
__tablename__ = "d"
id = Integer32(pk=True)
C.append_field('d', Array(D).store_as('table'))
assert C.Attributes.sqla_mapper.get_property('d').argument is D
print(repr(D.Attributes.sqla_table))
assert isinstance(D.Attributes.sqla_table.c['c_id'].type, sqlalchemy.Integer)
def test_add_field_array_many(self):
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
class D(TableModel):
__tablename__ = "d"
id = Integer32(pk=True)
C.append_field('d', Array(D).store_as(table(multi='c_d')))
assert C.Attributes.sqla_mapper.get_property('d').argument is D
rel_table = C.Attributes.sqla_metadata.tables['c_d']
assert 'c_id' in rel_table.c
assert 'd_id' in rel_table.c
def test_add_field_complex_cust(self):
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
class D(TableModel):
__tablename__ = "d"
id = Integer32(pk=True)
c = Array(C).store_as('table')
C.append_field('d', D.customize(
nullable=False,
store_as=table(left='d_id'),
))
assert C.__table__.c['d_id'].nullable == False
class TestSqlAlchemySchemaWithPostgresql(unittest.TestCase):
def setUp(self):
self.metadata = TableModel.Attributes.sqla_metadata = MetaData()
def test_enum(self):
table_name = "test_enum"
enums = ('SUBSCRIBED', 'UNSUBSCRIBED', 'UNCONFIRMED')
class SomeClass(TableModel):
__tablename__ = table_name
id = Integer32(primary_key=True)
e = Enum(*enums, type_name='status_choices')
t = self.metadata.tables[table_name]
assert 'e' in t.c
assert t.c.e.type.enums == enums
if __name__ == '__main__':
unittest.main()
| deevarvar/myLab | baidu_code/soap_mockserver/spyne/test/test_sqlalchemy.py | Python | mit | 25,512 |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_intfiles
# Purpose: From Spidering and from searching search engines, identifies
# files of potential interest.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 06/04/2014
# Copyright: (c) Steve Micallef 2014
# Licence: GPL
# -------------------------------------------------------------------------------
import re
import urllib
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_intfiles(SpiderFootPlugin):
"""Interesting Files:Footprint:Identifies potential files of interest, e.g. office documents, zip files."""
# Default options
opts = {
'pages': 20, # Number of search results pages to iterate
'fileexts': ["doc", "docx", "ppt", "pptx", "pdf", 'xls', 'xlsx', 'zip'],
'usesearch': True,
'searchengine': "yahoo"
}
# Option descriptions
optdescs = {
'pages': "Number of search engine results pages to iterate through if using one.",
'fileexts': "File extensions of files you consider interesting.",
'usesearch': "Use search engines to quickly find files. If false, only spidering will be used.",
'searchengine': "If using a search engine, which one? google, yahoo or bing."
}
results = list()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["INTERNET_NAME", "LINKED_URL_INTERNAL"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["SEARCH_ENGINE_WEB_CONTENT", "INTERESTING_FILE"]
def yahooCleaner(self, string):
return " url=\"" + urllib.unquote(string.group(1)) + "\" "
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
if eventName == "INTERNET_NAME" and not self.opts['usesearch']:
self.sf.debug("Not using a search engine to find interesting files.")
return None
if eventData in self.results:
return None
else:
self.results.append(eventData)
if eventName == "LINKED_URL_INTERNAL":
for fileExt in self.opts['fileexts']:
if "." + fileExt.lower() in eventData.lower():
if eventData in self.results:
continue
else:
self.results.append(eventData)
evt = SpiderFootEvent("INTERESTING_FILE", eventData,
self.__name__, event)
self.notifyListeners(evt)
return None
# Handling INTERNET_NAME event..
for fileExt in self.opts['fileexts']:
# Sites hosted on the domain
if self.opts['searchengine'].lower() == "google":
pages = self.sf.googleIterate("site:" + eventData + "+" +
"%2Bext:" + fileExt, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if self.opts['searchengine'].lower() == "bing":
pages = self.sf.bingIterate("site:" + eventData + "+" +
"%2Bext:" + fileExt, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if self.opts['searchengine'].lower() == "yahoo":
pages = self.sf.yahooIterate("site:" + eventData + "+" +
"%2Bext:" + fileExt, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if pages is None:
self.sf.info("No results returned from " + self.opts['searchengine'] +
" for " + fileExt + " files.")
continue
for page in pages.keys():
if page in self.results:
continue
else:
self.results.append(page)
# Check if we've been asked to stop
if self.checkForStop():
return None
# Submit the gresults for analysis
evt = SpiderFootEvent("SEARCH_ENGINE_WEB_CONTENT", pages[page],
self.__name__, event)
self.notifyListeners(evt)
if self.opts['searchengine'].lower() == "yahoo":
res = re.sub("RU=(.[^\/]+)\/RK=", self.yahooCleaner, pages[page], 0)
else:
res = pages[page]
links = self.sf.parseLinks(page, res, eventData)
if len(links) == 0:
continue
for link in links:
if link in self.results:
continue
else:
self.results.append(link)
if self.sf.urlFQDN(link).endswith(eventData) and \
"." + fileExt.lower() in link.lower():
self.sf.info("Found an interesting file: " + link)
evt = SpiderFootEvent("INTERESTING_FILE", link,
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_intfiles class
| Reality9/spiderfoot | modules/sfp_intfiles.py | Python | gpl-2.0 | 6,481 |
# -*- coding: utf-8 -*-
from typing import Text
from six.moves import urllib
from zerver.lib.test_classes import WebhookTestCase
class SolanoHookTests(WebhookTestCase):
STREAM_NAME = 'solano labs'
URL_TEMPLATE = u"/api/v1/external/solano?api_key={api_key}"
FIXTURE_DIR_NAME = 'solano'
def test_solano_message_001(self):
# type: () -> None
"""
Build notifications are generated by Solano Labs after build completes.
"""
expected_topic = u'build update'
expected_message = (u"Author: solano-ci[bot]@users.noreply.github.com\n"
u"Commit: [5f438401eb7cc7268cbc28438bfa70bb99f48a03]"
u"(github.com/fazerlicourice7/solano/commit/5f438401eb7cc7268"
u"cbc28438bfa70bb99f48a03)\nBuild status: failed :thumbsdown:\n"
u"[Build Log](https://ci.solanolabs.com:443/reports/3316175)")
self.send_and_test_stream_message('build_001', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_solano_message_002(self):
# type: () -> None
"""
Build notifications are generated by Solano Labs after build completes.
"""
expected_topic = u'build update'
expected_message = (u"Author: Unknown\nCommit: [5d0b92e26448a9e91db794bfed4b8c3556eabc4e]"
u"(bitbucket.org/fazerlicourice7/test/commits/5d0b92e26448a9e91db794bfed"
u"4b8c3556eabc4e)\nBuild status: failed :thumbsdown:\n"
u"[Build Log](https://ci.solanolabs.com:443/reports/3316723)")
self.send_and_test_stream_message('build_002', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_solano_message_received(self):
# type: () -> None
"""
Build notifications are generated by Solano Labs after build completes.
"""
expected_topic = u'build update'
expected_message = (u"Author: solano-ci[bot]@users.noreply.github.com\n"
u"Commit: [191d34f9da8ff7279b051cd68e44223253e18408]"
u"(github.com/anirudhjain75/scipy/commit/191d34f9da8ff7279b051cd"
u"68e44223253e18408)\nBuild status: running :arrows_counterclockwise:\n"
u"[Build Log](https://ci.solanolabs.com:443/reports/3317799)")
self.send_and_test_stream_message('received', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name):
# type: (Text) -> Text
return self.fixture_data(self.FIXTURE_DIR_NAME, fixture_name, file_type="json")
| sonali0901/zulip | zerver/webhooks/solano/tests.py | Python | apache-2.0 | 2,905 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import operator
from django.conf import settings
from django.utils import translation
from django.utils.translation import ugettext as _
from six.moves import urllib, zip_longest, zip, range
from typing import Any, List, Dict, Optional, Text
import os
import ujson
def with_language(string, language):
# type: (Text, Text) -> Text
old_language = translation.get_language()
translation.activate(language)
result = _(string)
translation.activate(old_language)
return result
def get_language_list():
# type: () -> List[Dict[str, Any]]
path = os.path.join(settings.STATIC_ROOT, 'locale', 'language_options.json')
with open(path, 'r') as reader:
languages = ujson.load(reader)
lang_list = []
for lang_info in languages['languages']:
name = lang_info['name']
lang_info['name'] = with_language(name, lang_info['code'])
lang_list.append(lang_info)
return sorted(lang_list, key=lambda i: i['name'])
def get_language_list_for_templates(default_language):
# type: (Text) -> List[Dict[str, Dict[str, str]]]
language_list = [l for l in get_language_list()
if 'percent_translated' not in l or
l['percent_translated'] >= 5.]
formatted_list = []
lang_len = len(language_list)
firsts_end = (lang_len // 2) + operator.mod(lang_len, 2)
firsts = list(range(0, firsts_end))
seconds = list(range(firsts_end, lang_len))
assert len(firsts) + len(seconds) == lang_len
for row in zip_longest(firsts, seconds):
item = {}
for position, ind in zip(['first', 'second'], row):
if ind is None:
continue
lang = language_list[ind]
percent = name = lang['name']
if 'percent_translated' in lang:
percent = u"{} ({}%)".format(name, lang['percent_translated'])
item[position] = {
'name': name,
'code': lang['code'],
'percent': percent,
'selected': True if default_language == lang['code'] else False
}
formatted_list.append(item)
return formatted_list
def get_language_name(code):
# type: (str) -> Optional[Text]
for lang in get_language_list():
if lang['code'] == code:
return lang['name']
def get_available_language_codes():
# type: () -> List[Text]
language_list = get_language_list()
codes = [language['code'] for language in language_list]
return codes
| sonali0901/zulip | zerver/lib/i18n.py | Python | apache-2.0 | 2,622 |
# $Id: fi.py 6460 2010-10-29 22:18:44Z milde $
# Author: Asko Soukka <asko.soukka@iki.fi>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Finnish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'huomio': u'attention',
u'varo': u'caution',
u'vaara': u'danger',
u'virhe': u'error',
u'vihje': u'hint',
u't\u00e4rke\u00e4\u00e4': u'important',
u'huomautus': u'note',
u'neuvo': u'tip',
u'varoitus': u'warning',
u'kehotus': u'admonition',
u'sivupalkki': u'sidebar',
u'aihe': u'topic',
u'rivi': u'line-block',
u'tasalevyinen': u'parsed-literal',
u'ohje': u'rubric',
u'epigraafi': u'epigraph',
u'kohokohdat': u'highlights',
u'lainaus': u'pull-quote',
u'taulukko': u'table',
u'csv-taulukko': u'csv-table',
u'list-table (translation required)': 'list-table',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#u'kysymykset': u'questions',
u'meta': u'meta',
'math (translation required)': 'math',
#u'kuvakartta': u'imagemap',
u'kuva': u'image',
u'kaavio': u'figure',
u'sis\u00e4llyt\u00e4': u'include',
u'raaka': u'raw',
u'korvaa': u'replace',
u'unicode': u'unicode',
u'p\u00e4iv\u00e4ys': u'date',
u'luokka': u'class',
u'rooli': u'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'sis\u00e4llys': u'contents',
u'kappale': u'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#u'alaviitteet': u'footnotes',
#u'viitaukset': u'citations',
u'target-notes (translation required)': u'target-notes'}
"""Finnish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'lyhennys': u'abbreviation',
u'akronyymi': u'acronym',
u'kirjainsana': u'acronym',
u'hakemisto': u'index',
u'luettelo': u'index',
u'alaindeksi': u'subscript',
u'indeksi': u'subscript',
u'yl\u00e4indeksi': u'superscript',
u'title-reference (translation required)': u'title-reference',
u'title (translation required)': u'title-reference',
u'pep-reference (translation required)': u'pep-reference',
u'rfc-reference (translation required)': u'rfc-reference',
u'korostus': u'emphasis',
u'vahvistus': u'strong',
u'tasalevyinen': u'literal',
'math (translation required)': 'math',
u'named-reference (translation required)': u'named-reference',
u'anonymous-reference (translation required)': u'anonymous-reference',
u'footnote-reference (translation required)': u'footnote-reference',
u'citation-reference (translation required)': u'citation-reference',
u'substitution-reference (translation required)': u'substitution-reference',
u'kohde': u'target',
u'uri-reference (translation required)': u'uri-reference',
u'raw (translation required)': 'raw',}
"""Mapping of Finnish role names to canonical role names for interpreted text.
"""
| ajaxsys/dict-admin | docutils/parsers/rst/languages/fi.py | Python | bsd-3-clause | 3,642 |
#!/usr/bin/env python3
'''Test for zone semantic checks during zone commit.'''
import os
from dnstest.libknot import libknot
from dnstest.test import Test
from dnstest.utils import *
t = Test()
knot = t.server("knot")
ctl = libknot.control.KnotCtl()
ZONE_NAME = "testzone."
t.start()
ctl.connect(os.path.join(knot.dir, "knot.sock"))
# Add new zone.
ctl.send_block(cmd="conf-begin")
resp = ctl.receive_block()
ctl.send_block(cmd="conf-set", section="zone", item="domain", data=ZONE_NAME)
resp = ctl.receive_block()
ctl.send_block(cmd="conf-commit")
resp = ctl.receive_block()
# Try to create initial zone contents with a semantic error.
ctl.send_block(cmd="zone-begin", zone=ZONE_NAME)
resp = ctl.receive_block()
ctl.send_block(cmd="zone-set", zone=ZONE_NAME, owner="@", ttl="3600", rtype="SOA",
data="a. b. 1 2 3 4 5")
resp = ctl.receive_block()
ctl.send_block(cmd="zone-set", zone=ZONE_NAME, owner="@", ttl="600", rtype="A",
data="192.168.0.1")
resp = ctl.receive_block()
ctl.send_block(cmd="zone-set", zone=ZONE_NAME, owner="@", ttl="3600", rtype="CNAME",
data="example.com.")
resp = ctl.receive_block()
try:
ctl.send_block(cmd="zone-commit", zone=ZONE_NAME)
resp = ctl.receive_block()
except libknot.control.KnotCtlError as e:
isset("semantic check" in e.message.lower(), "expected error")
else:
set_err("SEMANTIC CHECK NOT APPLIED")
# Fix the semantic error and continue.
ctl.send_block(cmd="zone-unset", zone=ZONE_NAME, owner="@", rtype="CNAME")
resp = ctl.receive_block()
ctl.send_block(cmd="zone-commit", zone=ZONE_NAME)
resp = ctl.receive_block()
# Check the resulting zone contents.
ctl.send_block(cmd="zone-read", zone=ZONE_NAME)
resp = ctl.receive_block()
isset(ZONE_NAME in resp, "zone contents")
isset("SOA" in resp[ZONE_NAME][ZONE_NAME], "zone SOA presence")
isset("3600" in resp[ZONE_NAME][ZONE_NAME]["SOA"]["ttl"], "zone SOA ttl")
isset("a. b. 1 2 3 4 5" in resp[ZONE_NAME][ZONE_NAME]["SOA"]["data"], "zone SOA rdata")
isset("A" in resp[ZONE_NAME][ZONE_NAME], "zone A presence")
isset("600" in resp[ZONE_NAME][ZONE_NAME]["A"]["ttl"], "zone A ttl")
isset("192.168.0.1" in resp[ZONE_NAME][ZONE_NAME]["A"]["data"], "zone A rdata")
isset("CNAME" not in resp[ZONE_NAME][ZONE_NAME], "zone CNAME absence")
# Try to introduce a semantic error to existing zone contents.
ctl.send_block(cmd="zone-begin", zone=ZONE_NAME)
resp = ctl.receive_block()
ctl.send_block(cmd="zone-set", zone=ZONE_NAME, owner="@", ttl="3600", rtype="CNAME",
data="example.com.")
resp = ctl.receive_block()
try:
ctl.send_block(cmd="zone-commit", zone=ZONE_NAME)
resp = ctl.receive_block()
except libknot.control.KnotCtlError as e:
isset("semantic check" in e.message.lower(), "expected error")
else:
set_err("SEMANTIC CHECK NOT APPLIED")
# Fix the semantic error and continue.
ctl.send_block(cmd="zone-unset", zone=ZONE_NAME, owner="@", rtype="CNAME")
resp = ctl.receive_block()
ctl.send_block(cmd="zone-commit", zone=ZONE_NAME)
resp = ctl.receive_block()
# Check the resulting zone contents.
ctl.send_block(cmd="zone-read", zone=ZONE_NAME)
resp = ctl.receive_block()
isset(ZONE_NAME in resp, "zone contents")
isset("SOA" in resp[ZONE_NAME][ZONE_NAME], "zone SOA presence")
isset("A" in resp[ZONE_NAME][ZONE_NAME], "zone A presence")
isset("CNAME" not in resp[ZONE_NAME][ZONE_NAME], "zone CNAME absence")
# Cleanup.
ctl.send(libknot.control.KnotCtlType.END)
ctl.close()
t.end()
| CZ-NIC/knot | tests-extra/tests/ctl/zone_semchecks/test.py | Python | gpl-3.0 | 3,480 |
import gym
from gym.envs.classic_control import PendulumEnv, CartPoleEnv
import numpy as np
# MuJoCo may not be installed.
HalfCheetahEnv = HopperEnv = None
try:
from gym.envs.mujoco import HalfCheetahEnv, HopperEnv
except (ImportError, gym.error.DependencyNotInstalled):
pass
class CartPoleWrapper(CartPoleEnv):
"""Wrapper for the Cartpole-v0 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
# obs = batch * [pos, vel, angle, rotation_rate]
x = obs_next[:, 0]
theta = obs_next[:, 2]
rew = (x < -self.x_threshold) | (x > self.x_threshold) | (
theta < -self.theta_threshold_radians) | (
theta > self.theta_threshold_radians)
rew = rew.astype(float)
return rew
class PendulumWrapper(PendulumEnv):
"""Wrapper for the Pendulum-v0 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
# obs = [cos(theta), sin(theta), dtheta/dt]
# To get the angle back from obs: atan2(sin(theta), cos(theta)).
theta = np.arctan2(
np.clip(obs[:, 1], -1.0, 1.0), np.clip(obs[:, 0], -1.0, 1.0))
# Do everything in (B,) space (single theta-, action- and
# reward values).
a = np.clip(action, -self.max_torque, self.max_torque)[0]
costs = self.angle_normalize(theta) ** 2 + \
0.1 * obs[:, 2] ** 2 + 0.001 * (a ** 2)
return -costs
@staticmethod
def angle_normalize(x):
return (((x + np.pi) % (2 * np.pi)) - np.pi)
if HalfCheetahEnv:
class HalfCheetahWrapper(HalfCheetahEnv):
"""Wrapper for the MuJoCo HalfCheetah-v2 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
if obs.ndim == 2 and action.ndim == 2:
assert obs.shape == obs_next.shape
forward_vel = obs_next[:, 8]
ctrl_cost = 0.1 * np.sum(np.square(action), axis=1)
reward = forward_vel - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
else:
forward_vel = obs_next[8]
ctrl_cost = 0.1 * np.square(action).sum()
reward = forward_vel - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
class HopperWrapper(HopperEnv):
"""Wrapper for the MuJoCo Hopper-v2 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
alive_bonus = 1.0
assert obs.ndim == 2 and action.ndim == 2
assert (obs.shape == obs_next.shape
and action.shape[0] == obs.shape[0])
vel = obs_next[:, 5]
ctrl_cost = 1e-3 * np.sum(np.square(action), axis=1)
reward = vel + alive_bonus - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
if __name__ == "__main__":
env = PendulumWrapper()
env.reset()
for _ in range(100):
env.step(env.action_space.sample())
env.render()
| richardliaw/ray | rllib/examples/env/mbmpo_env.py | Python | apache-2.0 | 3,383 |
#! /usr/bin/python
#from check_dependencies import CheckDependencies
#def test_default():
# CheckDependencies(None)
#def test_hydrotrend():
# CheckDependencies("hydrotrend")
#def test_cem():
# CheckDependencies("cem")
#def test_child():
# CheckDependencies("child")
#def test_child():
# CheckDependencies("sedflux")
| csdms/packagebuilder | packager/core/test/test_check_dependencies.py | Python | mit | 337 |
"""
sentry.models.project
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import warnings
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import F
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model,
sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.cache import Lock
from sentry.utils.http import absolute_uri
# TODO(dcramer): pull in enum library
class ProjectStatus(object):
VISIBLE = 0
HIDDEN = 1
PENDING_DELETION = 2
DELETION_IN_PROGRESS = 3
class ProjectManager(BaseManager):
# TODO(dcramer): we might want to cache this per user
def get_for_user(self, team, user, _skip_team_check=False):
from sentry.models import Team
if not (user and user.is_authenticated()):
return []
if not _skip_team_check:
team_list = Team.objects.get_for_user(
organization=team.organization,
user=user,
)
try:
team = team_list[team_list.index(team)]
except ValueError:
logging.info('User does not have access to team: %s', team.id)
return []
base_qs = self.filter(
team=team,
status=ProjectStatus.VISIBLE,
)
project_list = []
for project in base_qs:
project.team = team
project_list.append(project)
return sorted(project_list, key=lambda x: x.name.lower())
class Project(Model):
"""
Projects are permission based namespaces which generally
are the top level entry point for all data.
"""
slug = models.SlugField(null=True)
name = models.CharField(max_length=200)
organization = FlexibleForeignKey('sentry.Organization')
team = FlexibleForeignKey('sentry.Team')
public = models.BooleanField(default=False)
date_added = models.DateTimeField(default=timezone.now)
status = BoundedPositiveIntegerField(default=0, choices=(
(ProjectStatus.VISIBLE, _('Active')),
(ProjectStatus.PENDING_DELETION, _('Pending Deletion')),
(ProjectStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), db_index=True)
# projects that were created before this field was present
# will have their first_event field set to date_added
first_event = models.DateTimeField(null=True)
objects = ProjectManager(cache_fields=[
'pk',
'slug',
])
class Meta:
app_label = 'sentry'
db_table = 'sentry_project'
unique_together = (('team', 'slug'), ('organization', 'slug'))
__repr__ = sane_repr('team_id', 'slug')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def save(self, *args, **kwargs):
if not self.slug:
lock_key = 'slug:project'
with Lock(lock_key):
slugify_instance(self, self.name, organization=self.organization)
super(Project, self).save(*args, **kwargs)
else:
super(Project, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri(reverse('sentry-stream', args=[
self.organization.slug, self.slug]))
def merge_to(self, project):
from sentry.models import (
Group, GroupTagValue, Event, TagValue
)
if not isinstance(project, Project):
project = Project.objects.get_from_cache(pk=project)
for group in Group.objects.filter(project=self):
try:
other = Group.objects.get(
project=project,
)
except Group.DoesNotExist:
group.update(project=project)
for model in (Event, GroupTagValue):
model.objects.filter(project=self, group=group).update(project=project)
else:
Event.objects.filter(group=group).update(group=other)
for obj in GroupTagValue.objects.filter(group=group):
obj2, created = GroupTagValue.objects.get_or_create(
project=project,
group=group,
key=obj.key,
value=obj.value,
defaults={'times_seen': obj.times_seen}
)
if not created:
obj2.update(times_seen=F('times_seen') + obj.times_seen)
for fv in TagValue.objects.filter(project=self):
TagValue.objects.get_or_create(project=project, key=fv.key, value=fv.value)
fv.delete()
self.delete()
def is_internal_project(self):
for value in (settings.SENTRY_FRONTEND_PROJECT, settings.SENTRY_PROJECT):
if str(self.id) == str(value) or str(self.slug) == str(value):
return True
return False
def get_tags(self, with_internal=True):
from sentry.models import TagKey
if not hasattr(self, '_tag_cache'):
tags = self.get_option('tags', None)
if tags is None:
tags = [
t for t in TagKey.objects.all_keys(self)
if with_internal or not t.startswith('sentry:')
]
self._tag_cache = tags
return self._tag_cache
# TODO: Make these a mixin
def update_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.set_value(self, *args, **kwargs)
def get_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.get_value(self, *args, **kwargs)
def delete_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.unset_value(self, *args, **kwargs)
@property
def member_set(self):
from sentry.models import OrganizationMember
return self.organization.member_set.filter(
id__in=OrganizationMember.objects.filter(
organizationmemberteam__is_active=True,
organizationmemberteam__team=self.team,
).values('id'),
user__is_active=True,
).distinct()
def has_access(self, user, access=None):
from sentry.models import AuthIdentity, OrganizationMember
warnings.warn('Project.has_access is deprecated.', DeprecationWarning)
queryset = self.member_set.filter(user=user)
if access is not None:
queryset = queryset.filter(type__lte=access)
try:
member = queryset.get()
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
user=member.user_id,
)
except AuthIdentity.DoesNotExist:
return True
return auth_identity.is_valid(member)
def get_audit_log_data(self):
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'status': self.status,
'public': self.public,
}
def get_full_name(self):
if self.team.name not in self.name:
return '%s %s' % (self.team.name, self.name)
return self.name
| imankulov/sentry | src/sentry/models/project.py | Python | bsd-3-clause | 7,758 |
from bs4 import BeautifulSoup
import httplib, codecs, datetime
import cPickle as pickle
import time
def stan_tag(criteria, server):
tagged = []
file_count = 47
for ix, c in enumerate(criteria[2250000:]):
# initialize list of sentences
sents = []
try:
# send text to server
server.request('', c)
res = BeautifulSoup(server.getresponse().read())
# loop through sentences to generate lists of tagged/lemmatized tuples
for sentence in res.findAll('sentence'):
sent_tag = []
for word in sentence.findAll('word'):
sent_tag.append((word.get_text(), word['pos'], word['lemma']))
sents.append(sent_tag)
except:
print c
print ix
server = httplib.HTTPConnection('127.0.0.1:2020')
sents.append(c)
# add sentence to tagged list
tagged.append(sents)
#save every 50,000 lines
if ix % 50000 == 0:
print 'Line: ', ix
print 'File: ', file_count
print
pickle.dump(tagged, open('data/stanford_tagged/stanford_tagged_criteria_%d.pkl' % (file_count), 'wb'))
file_count += 1
del tagged
tagged = []
pickle.dump(tagged, open('data/stanford_tagged/stanford_tagged_criteria_%d.pkl' % (file_count), 'wb'))
print 'Complete'
def main():
server = httplib.HTTPConnection('127.0.0.1:2020')
criteria = codecs.open('data/stanford_sentence_list.csv','r').readlines()
stan_tag(criteria, server)
if __name__ == '__main__':
main()
| jasonost/clinicaltrials | trial_criteria/StanfordPOStagging.py | Python | mit | 1,663 |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.core.urlresolvers import reverse
from django.test import TestCase
from .user_agents import USER_AGENTS
from mob.middleware import MobileDetectorMiddleware, MobileTemplateMiddleware
from django.test.client import Client
def request(ua):
return type('HttpRequest', (object,), {'META': {'HTTP_USER_AGENT': ua}})()
def response():
return type('HttpResponse', (object,), {'template_name': ['test_template.html']})()
class UserAgentTest(TestCase):
def test_mobile_detector_middleware(self):
detector = MobileDetectorMiddleware()
for ua in USER_AGENTS:
req = request(ua)
detector.process_request(req)
self.assertTrue(req.is_mobile, ua)
def test_mobile_template_middleware(self):
detector = MobileDetectorMiddleware()
extender = MobileTemplateMiddleware()
for ua in USER_AGENTS:
req = request(ua)
resp = response()
self.assertEqual(1, len(resp.template_name))
detector.process_request(req)
extender.process_template_response(req, resp)
self.assertEqual(3, len(resp.template_name), resp.template_name)
self.assertTrue('mobile' in resp.template_name[0])
self.assertTrue('mobile' in resp.template_name[1])
self.assertFalse('mobile' in resp.template_name[2])
def test_mobile_attributes(self):
detector = MobileDetectorMiddleware()
iphone, ipod, ipad = map(lambda line: line.strip(), """Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3 like Mac OS X; fr-fr) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8F190 Safari/6533.18.5
Mozilla/5.0 (iPod; U; CPU iPhone OS 4_2_1 like Mac OS X; he-il) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5
Mozilla/5.0 (iPad; U; CPU OS 3_2_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B500 Safari/53""".split('\n'))
req = request(iphone)
detector.process_request(req)
self.assertEqual('iphone', req.mobile.slug)
req = request(ipod)
detector.process_request(req)
self.assertEqual('ipod', req.mobile.slug)
req = request(ipad)
detector.process_request(req)
self.assertEqual('ipad', req.mobile.slug)
def test_mobile_session_key(self):
client = Client()
ua = {'HTTP_USER_AGENT': 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3 like Mac OS X; fr-fr) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8F190 Safari/6533.18.5' }
resp = client.get('/', **ua)
self.assertTrue('This is the mobile website.' in resp.content, resp.content)
resp = client.get(reverse('mob:on'), follow=True, **ua)
self.assertTrue('This is the full website.' in resp.content, resp.content)
resp = client.get(reverse('mob:off'), follow=True, **ua)
self.assertTrue('This is the mobile website.' in resp.content, resp.content)
| caffeinehit/django-mob | tests/app/tests.py | Python | mit | 3,198 |
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="maxpoints", parent_name="choropleth.stream", **kwargs
):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 10000),
min=kwargs.pop("min", 0),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/choropleth/stream/_maxpoints.py | Python | mit | 506 |
from django.contrib import admin
from bcmon.models import *
from lib.admin.actions import export_as_csv_action
#class ChannelInline(admin.TabularInline):
# model = Channel
class PlayoutAdmin(admin.ModelAdmin):
list_display = ('title', 'time_start', 'time_end', 'channel', 'status', 'score', 'dummy_result', 'media')
list_filter = ('channel', 'status', 'score',)
readonly_fields = ('created', 'updated', 'uuid', 'enmfp', 'analyzer_data', 'echoprintfp', 'echoprint_data', 'score', 'time_start', 'time_end')
date_hierarchy = 'created'
inlines = []
actions = [export_as_csv_action("CSV Export", fields=['title', 'created', 'channel'])]
class ChannelAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'stream_url', )
readonly_fields = ('created', 'updated', 'uuid', 'slug', )
admin.site.register(Playout, PlayoutAdmin)
admin.site.register(Channel, ChannelAdmin)
| hzlf/openbroadcast | website/apps/bcmon/admin.py | Python | gpl-3.0 | 965 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-11 16:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurant', '0008_auto_20170205_0020'),
('restaurant', '0011_auto_20170207_1430'),
]
operations = [
]
| midhun3112/restaurant_locator | Restaurant_Finder_App/restaurant_finder_app/restaurant_finder_app/restaurant/migrations/0012_merge_20170211_1641.py | Python | apache-2.0 | 344 |
from Database.Controllers.Campus import Campus
class Departamento(object):
def __init__(self,dados=None):
if dados is not None:
self.id = dados ['id']
self.nome = dados ['nome']
self.codigo = dados ['codigo']
self.sigla = dados ['sigla']
self.id_campus = dados ['id_campus']
def getId(self):
return self.id
def setNome(self,nome):
self.nome = nome
def getNome(self):
return self.nome
def setCodigo(self,codigo):
self.codigo = codigo
def getCodigo(self):
return self.codigo
def setSigla(self,sigla):
self.sigla = sigla
def getSigla(self):
return self.sigla
def setId_campus(self,id_campus):
self.id_campus = id_campus
def getId_campus(self):
return self.id_campus
def getCampus(self):
return (Campus().pegarCampus('id = %s',(self.id_campus,))).getNome()
| AEDA-Solutions/matweb | backend/Database/Models/Departamento.py | Python | mit | 830 |
"""
DWF Python Example 1
Author: Tobias Badertscher based on work by Digilent, Inc.
Revision: 12/31/2013
Requires:
Python 2.7
"""
import sys
sys.path.append('..')
from pydwf import dwf
def Device_Enumeration():
#print DWF version
version = dwf.GetVersion()
print "DWF Version: "+version
#enumerate and print device information
cdevices = dwf.Enum(0)
print "Number of Devices: "+str(cdevices)
for i in range(0, cdevices):
devicename = dwf.EnumDeviceName (i)
serialnum = dwf.EnumSN (i)
print "------------------------------"
print "Device %d :" % ( i)
print "\t" + devicename
print "\t" + serialnum
IsInUse =dwf.EnumDeviceIsOpened(i)
if not IsInUse:
hdwf = dwf.DeviceOpen(i)
channel = dwf.AnalogInChannelCount(hdwf)
hzfreq = dwf.AnalogInFrequencyInfo(hdwf)
print "\tAnalog input channels: "+str(channel)
print "\tMax freq: "+str(hzfreq)
dwf.DeviceClose(hdwf)
hdwf = -1
# ensure all devices are closed
dwf.DeviceCloseAll()
if __name__ == '__main__':
Device_Enumeration()
| tobbad/pydwf | digilent_samples/Device_Enumeration.py | Python | lgpl-3.0 | 1,233 |
import os
import json
import re
from BeautifulSoup import BeautifulSoup
from psrd.rules import write_rules
from psrd.files import char_replace
from psrd.universal import parse_universal
from psrd.sections import ability_pass, is_anonymous_section, has_subsections, entity_pass, quote_pass
def parse_attr_line(text):
attr = None
armor_check = False
trained = False
m = re.search('\((.*)\)', text)
if m:
parts = m.group(1).split("; ")
attr = parts.pop(0)
for part in parts:
if part.lower() == 'armor check penalty':
armor_check = True
if part.lower() == 'trained only':
trained = True
return attr, armor_check, trained
def skill_pass(skill):
t = skill['sections'][0]
skill['type'] = 'skill'
skill['sections'] = t['sections']
soup = BeautifulSoup(t['text'])
skill['description'] = ''.join(soup.findAll(text=True))
attr, armor_check, trained = parse_attr_line(t['name'])
skill['attribute'] = attr
skill['armor_check_penalty'] = armor_check
skill['trained_only'] = trained
def parse_skills(filename, output, book):
skill = parse_universal(filename, output, book)
skill = quote_pass(skill)
skill = entity_pass(skill)
skill_pass(skill)
print "%s: %s" %(skill['source'], skill['name'])
filename = create_skill_filename(output, book, skill)
fp = open(filename, 'w')
json.dump(skill, fp, indent=4)
fp.close()
def create_skill_filename(output, book, skill):
title = char_replace(book) + "/skills/" + char_replace(skill['name'])
return os.path.abspath(output + "/" + title + ".json")
| devonjones/PSRD-Parser | src/psrd/skills.py | Python | gpl-3.0 | 1,523 |
"""INSTEON Device Type Dimmable Lighting Control Module."""
from insteonplm.devices import Device
from insteonplm.states.dimmable import (
DimmableSwitch,
DimmableSwitch_Fan,
DimmableKeypadA,
)
from insteonplm.states.onOff import OnOffKeypad, OnOffKeypadLed
class DimmableLightingControl(Device):
"""Dimmable Lighting Controller.
INSTEON On/Off switch device class. Available device control options are:
- light_on(onlevel=0xff)
- light_on_fast(onlevel=0xff)
- light_off()
- light_off_fast()
To monitor changes to the state of the device subscribe to the state
monitor:
- lightOnLevel.connect(callback) (state='LightOnLevel')
where callback defined as:
- callback(self, device_id, state, state_value)
"""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the DimmableLightingControl Class."""
Device.__init__(
self, plm, address, cat, subcat, product_key, description, model
)
self._stateList[0x01] = DimmableSwitch(
self._address,
"lightOnLevel",
0x01,
self._send_msg,
self._message_callbacks,
0x00,
)
class DimmableLightingControl_2475F(DimmableLightingControl):
"""FanLinc model 2475F Dimmable Lighting Control.
Device Class 0x01 subcat 0x2e
Two separate INSTEON On/Off switch devices are created with ID
1) Ligth
- ID: xxxxxx (where xxxxxx is the Insteon address of the device)
- Controls:
- light_on(onlevel=0xff)
- light_on_fast(onlevel=0xff)
- light_off()
- light_off_fast()
- Monitor: lightOnLevel.connect(callback)
2) Fan
- ID: xxxxxx_2 (where xxxxxx is the Insteon address of the device)
- Controls:
- fan_on(onlevel=0xff)
- fan_off()
- light_on(onlevel=0xff) - Same as fan_on(onlevel=0xff)
- light_off() - Same as fan_off()
- Monitor: fanSpeed.connect(callback)
where callback defined as:
- callback(self, device_id, state, state_value)
"""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the DimmableLightingControl_2475F Class."""
super().__init__(plm, address, cat, subcat, product_key, description, model)
self._stateList[0x01] = DimmableSwitch(
self._address,
"lightOnLevel",
0x01,
self._send_msg,
self._message_callbacks,
0x00,
)
self._stateList[0x02] = DimmableSwitch_Fan(
self._address,
"fanOnLevel",
0x02,
self._send_msg,
self._message_callbacks,
0x00,
)
class DimmableLightingControl_2334_222(Device):
"""On/Off KeypadLinc Switched Lighting Control."""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the SwichedLightingControlKeypad device class."""
super().__init__(plm, address, cat, subcat, product_key, description, model)
self._leds = OnOffKeypadLed(
self._address,
"keypadLEDs",
0x00,
self._send_msg,
self._message_callbacks,
0x00,
self._plm.loop,
)
self._stateList[0x01] = DimmableKeypadA(
self._address,
"keypadButtonMain",
0x01,
self._send_msg,
self._message_callbacks,
0x00,
self._leds,
)
def _add_buttons(self, button_list):
for group in button_list:
self._stateList[group] = OnOffKeypad(
self._address,
"keypadButton{}".format(button_list[group]),
group,
self._send_msg,
self._message_callbacks,
0x00,
self._plm.loop,
self._leds,
)
self._leds.register_led_updates(self._stateList[group].led_changed, group)
class DimmableLightingControl_2334_222_8(DimmableLightingControl_2334_222):
"""Dimmable 8 Button KeypadLinc Switched Lighting Control."""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the SwitchedLightingControl_2487S device class."""
super().__init__(plm, address, cat, subcat, product_key, description, model)
button_list = {2: "B", 3: "C", 4: "D", 5: "E", 6: "F", 7: "G", 8: "H"}
self._add_buttons(button_list)
class DimmableLightingControl_2334_222_6(DimmableLightingControl_2334_222):
"""Dimmable 6 Button KeypadLinc Switched Lighting Control."""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the SwitchedLightingControl_2487S device class."""
super().__init__(plm, address, cat, subcat, product_key, description, model)
button_list = {3: "A", 4: "B", 5: "C", 6: "D"}
self._add_buttons(button_list)
| nugget/python-insteonplm | insteonplm/devices/dimmableLightingControl.py | Python | mit | 5,358 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; specifically version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# This code was inspired in the autotest project,
# client/shared/test.py
# Authors: Martin J Bligh <mbligh@google.com>,
# Andy Whitcroft <apw@shadowen.org>
"""
Contains the base test implementation, used as a base for the actual
framework tests.
"""
import inspect
import logging
import os
import re
import shutil
import sys
import time
from . import data_dir
from . import exceptions
from . import multiplexer
from . import sysinfo
from ..utils import asset
from ..utils import astring
from ..utils import data_structures
from ..utils import genio
from ..utils import path as utils_path
from ..utils import process
from ..utils import stacktrace
from .settings import settings
from .version import VERSION
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
class NameNotTestNameError(Exception):
"""
The given test name is not a TestName instance
With the introduction of :class:`avocado.core.test.TestName`, it's
not allowed to use other types as the ``name`` parameter to a test
instance. This exception is raised when this is attempted.
"""
class TestName(object):
"""
Test name representation
"""
def __init__(self, uid, name, variant=None, no_digits=None):
"""
Test name according to avocado specification
:param uid: unique test id (within the job)
:param name: test name (identifies the executed test)
:param variant: variant id
:param no_digits: number of digits of the test uid
"""
self.uid = uid
if no_digits >= 0:
self.str_uid = str(uid).zfill(no_digits if no_digits else 3)
else:
self.str_uid = str(uid)
self.name = name or "<unknown>"
self.variant = variant
self.str_variant = "" if variant is None else ";" + str(variant)
def __str__(self):
return "%s-%s%s" % (self.str_uid, self.name, self.str_variant)
def __repr__(self):
return repr(str(self))
def __eq__(self, other):
if isinstance(other, basestring):
return str(self) == other
else:
return self.__dict__ == other.__dict__
def str_filesystem(self):
"""
File-system friendly representation of the test name
"""
name = str(self)
fsname = astring.string_to_safe_path(name)
if len(name) == len(fsname): # everything fits in
return fsname
# 001-mytest;foo
# 001-mytest;f
# 001-myte;foo
idx_fit_variant = len(fsname) - len(self.str_variant)
if idx_fit_variant > len(self.str_uid): # full uid+variant
return (fsname[:idx_fit_variant] +
astring.string_to_safe_path(self.str_variant))
elif len(self.str_uid) <= len(fsname): # full uid
return astring.string_to_safe_path(self.str_uid + self.str_variant)
else: # not even uid could be stored in fs
raise AssertionError("Test uid is too long to be stored on the "
"filesystem: %s\nFull test name is %s"
% (self.str_uid, str(self)))
class Test(unittest.TestCase):
"""
Base implementation for the test class.
You'll inherit from this to write your own tests. Typically you'll want
to implement setUp(), test*() and tearDown() methods on your own tests.
"""
default_params = {}
def __init__(self, methodName='test', name=None, params=None,
base_logdir=None, job=None, runner_queue=None):
"""
Initializes the test.
:param methodName: Name of the main method to run. For the sake of
compatibility with the original unittest class,
you should not set this.
:param name: Pretty name of the test name. For normal tests,
written with the avocado API, this should not be
set. This is reserved for internal Avocado use,
such as when running random executables as tests.
:type name: :class:`avocado.core.test.TestName`
:param base_logdir: Directory where test logs should go. If None
provided, it'll use
:func:`avocado.data_dir.create_job_logs_dir`.
:param job: The job that this test is part of.
:raises: :class:`avocado.core.test.NameNotTestNameError`
"""
def record_and_warn(*args, **kwargs):
""" Record call to this function and log warning """
if not self.__log_warn_used:
self.__log_warn_used = True
return original_log_warn(*args, **kwargs)
if name is not None:
if not isinstance(name, TestName):
raise NameNotTestNameError(name)
self.name = name
else:
self.name = TestName(0, self.__class__.__name__)
self.job = job
if self.datadir is None:
self._expected_stdout_file = None
self._expected_stderr_file = None
else:
self._expected_stdout_file = os.path.join(self.datadir,
'stdout.expected')
self._expected_stderr_file = os.path.join(self.datadir,
'stderr.expected')
if base_logdir is None:
base_logdir = data_dir.create_job_logs_dir()
base_logdir = os.path.join(base_logdir, 'test-results')
logdir = os.path.join(base_logdir, self.name.str_filesystem())
if os.path.exists(logdir):
raise exceptions.TestSetupFail("Log dir already exists, this "
"should never happen: %s"
% logdir)
self.logdir = utils_path.init_dir(logdir)
# Replace '/' with '_' to avoid splitting name into multiple dirs
genio.set_log_file_dir(self.logdir)
self.logfile = os.path.join(self.logdir, 'debug.log')
self._ssh_logfile = os.path.join(self.logdir, 'remote.log')
self._stdout_file = os.path.join(self.logdir, 'stdout')
self._stderr_file = os.path.join(self.logdir, 'stderr')
self.outputdir = utils_path.init_dir(self.logdir, 'data')
self.sysinfodir = utils_path.init_dir(self.logdir, 'sysinfo')
self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)
self.log = logging.getLogger("avocado.test")
original_log_warn = self.log.warning
self.__log_warn_used = False
self.log.warn = self.log.warning = record_and_warn
mux_path = ['/test/*']
if isinstance(params, dict):
self.default_params = self.default_params.copy()
self.default_params.update(params)
params = []
elif params is None:
params = []
elif isinstance(params, tuple):
params, mux_path = params[0], params[1]
self.params = multiplexer.AvocadoParams(params, self.name,
mux_path,
self.default_params)
default_timeout = getattr(self, "timeout", None)
self.timeout = self.params.get("timeout", default=default_timeout)
self.log.info('START %s', self.name)
self.debugdir = None
self.resultsdir = None
self.status = None
self.fail_reason = None
self.fail_class = None
self.traceback = None
self.text_output = None
self.whiteboard = ''
self.running = False
self.time_start = -1
self.time_end = -1
self.paused = False
self.paused_msg = ''
self.runner_queue = runner_queue
self.time_elapsed = -1
unittest.TestCase.__init__(self, methodName=methodName)
@property
def basedir(self):
"""
The directory where this test (when backed by a file) is located at
"""
if self.filename is not None:
return os.path.dirname(self.filename)
else:
return None
@property
def datadir(self):
"""
Returns the path to the directory that contains test data files
"""
# Maximal allowed file name length is 255
if (self.filename is not None and
len(os.path.basename(self.filename)) < 251):
return self.filename + '.data'
else:
return None
@property
def filename(self):
"""
Returns the name of the file (path) that holds the current test
"""
possibly_compiled = inspect.getfile(self.__class__)
if possibly_compiled.endswith('.pyc') or possibly_compiled.endswith('.pyo'):
source = possibly_compiled[:-1]
else:
source = possibly_compiled
if os.path.exists(source):
return source
else:
return None
@data_structures.LazyProperty
def workdir(self):
basename = (os.path.basename(self.logdir).replace(':', '_')
.replace(';', '_'))
return utils_path.init_dir(data_dir.get_tmp_dir(), basename)
@data_structures.LazyProperty
def srcdir(self):
return utils_path.init_dir(self.workdir, 'src')
@data_structures.LazyProperty
def cache_dirs(self):
"""
Returns a list of cache directories as set in config file.
"""
cache_dirs = settings.get_value('datadir.paths', 'cache_dirs',
key_type=list, default=[])
datadir_cache = os.path.join(data_dir.get_data_dir(), 'cache')
if datadir_cache not in cache_dirs:
cache_dirs.append(datadir_cache)
return cache_dirs
def __str__(self):
return str(self.name)
def __repr__(self):
return "Test(%r)" % self.name
def _tag_start(self):
self.running = True
self.time_start = time.time()
def _tag_end(self):
self.running = False
self.time_end = time.time()
# for consistency sake, always use the same stupid method
self._update_time_elapsed(self.time_end)
def _update_time_elapsed(self, current_time=None):
if current_time is None:
current_time = time.time()
self.time_elapsed = current_time - self.time_start
def report_state(self):
"""
Send the current test state to the test runner process
"""
if self.runner_queue is not None:
self.runner_queue.put(self.get_state())
def get_state(self):
"""
Serialize selected attributes representing the test state
:returns: a dictionary containing relevant test state data
:rtype: dict
"""
if self.running and self.time_start:
self._update_time_elapsed()
preserve_attr = ['basedir', 'debugdir', 'depsdir', 'fail_reason',
'logdir', 'logfile', 'name', 'resultsdir', 'srcdir',
'status', 'sysinfodir', 'text_output', 'time_elapsed',
'traceback', 'workdir', 'whiteboard', 'time_start',
'time_end', 'running', 'paused', 'paused_msg',
'fail_class', 'params', "timeout"]
state = dict([(key, self.__dict__.get(key)) for key in preserve_attr])
state['class_name'] = self.__class__.__name__
state['job_logdir'] = self.job.logdir
state['job_unique_id'] = self.job.unique_id
return state
def _register_log_file_handler(self, logger, formatter, filename,
log_level=logging.DEBUG):
file_handler = logging.FileHandler(filename=filename)
file_handler.setLevel(log_level)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return file_handler
def _start_logging(self):
"""
Simple helper for adding a file logger to the root logger.
"""
self.file_handler = logging.FileHandler(filename=self.logfile)
self.file_handler.setLevel(logging.DEBUG)
fmt = '%(asctime)s %(levelname)-5.5s| %(message)s'
formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
self.file_handler.setFormatter(formatter)
self.log.addHandler(self.file_handler)
stream_fmt = '%(message)s'
stream_formatter = logging.Formatter(fmt=stream_fmt)
self._register_log_file_handler(logging.getLogger("avocado.test.stdout"),
stream_formatter,
self._stdout_file)
self._register_log_file_handler(logging.getLogger("avocado.test.stderr"),
stream_formatter,
self._stderr_file)
self._ssh_fh = self._register_log_file_handler(logging.getLogger('paramiko'),
formatter,
self._ssh_logfile)
def _stop_logging(self):
"""
Stop the logging activity of the test by cleaning the logger handlers.
"""
self.log.removeHandler(self.file_handler)
logging.getLogger('paramiko').removeHandler(self._ssh_fh)
def _record_reference_stdout(self):
if self.datadir is not None:
utils_path.init_dir(self.datadir)
shutil.copyfile(self._stdout_file, self._expected_stdout_file)
def _record_reference_stderr(self):
if self.datadir is not None:
utils_path.init_dir(self.datadir)
shutil.copyfile(self._stderr_file, self._expected_stderr_file)
def _check_reference_stdout(self):
if (self._expected_stdout_file is not None and
os.path.isfile(self._expected_stdout_file)):
expected = genio.read_file(self._expected_stdout_file)
actual = genio.read_file(self._stdout_file)
msg = ('Actual test sdtout differs from expected one:\n'
'Actual:\n%s\nExpected:\n%s' % (actual, expected))
self.assertEqual(expected, actual, msg)
def _check_reference_stderr(self):
if (self._expected_stderr_file is not None and
os.path.isfile(self._expected_stderr_file)):
expected = genio.read_file(self._expected_stderr_file)
actual = genio.read_file(self._stderr_file)
msg = ('Actual test sdterr differs from expected one:\n'
'Actual:\n%s\nExpected:\n%s' % (actual, expected))
self.assertEqual(expected, actual, msg)
def _run_avocado(self):
"""
Auxiliary method to run_avocado.
"""
testMethod = getattr(self, self._testMethodName)
self._start_logging()
self.sysinfo_logger.start_test_hook()
test_exception = None
cleanup_exception = None
stdout_check_exception = None
stderr_check_exception = None
try:
self.setUp()
except exceptions.TestSkipError as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
raise exceptions.TestSkipError(details)
except exceptions.TestTimeoutSkip as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
raise exceptions.TestTimeoutSkip(details)
except: # Old-style exceptions are not inherited from Exception()
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
details = sys.exc_info()[1]
raise exceptions.TestSetupFail(details)
try:
testMethod()
except exceptions.TestSkipError as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
skip_illegal_msg = ('Calling skip() in places other than '
'setUp() is not allowed in avocado, you '
'must fix your test. Original skip exception: '
'%s' % details)
raise exceptions.TestError(skip_illegal_msg)
except: # Old-style exceptions are not inherited from Exception()
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
details = sys.exc_info()[1]
if not isinstance(details, Exception): # Avoid passing nasty exc
details = exceptions.TestError("%r: %s" % (details, details))
test_exception = details
finally:
try:
self.tearDown()
except exceptions.TestSkipError as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
skip_illegal_msg = ('Calling skip() in places other than '
'setUp() is not allowed in avocado, '
'you must fix your test. Original skip '
'exception: %s' % details)
raise exceptions.TestError(skip_illegal_msg)
except: # avoid old-style exception failures
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
details = sys.exc_info()[1]
cleanup_exception = exceptions.TestSetupFail(details)
whiteboard_file = os.path.join(self.logdir, 'whiteboard')
genio.write_file(whiteboard_file, self.whiteboard)
if self.job is not None:
job_standalone = getattr(self.job.args, 'standalone', False)
output_check_record = getattr(self.job.args,
'output_check_record', 'none')
no_record_mode = (not job_standalone and
output_check_record == 'none')
disable_output_check = (not job_standalone and
getattr(self.job.args,
'output_check', 'on') == 'off')
if job_standalone or no_record_mode:
if not disable_output_check:
try:
self._check_reference_stdout()
except Exception as details:
stacktrace.log_exc_info(sys.exc_info(),
logger='avocado.test')
stdout_check_exception = details
try:
self._check_reference_stderr()
except Exception as details:
stacktrace.log_exc_info(sys.exc_info(),
logger='avocado.test')
stderr_check_exception = details
elif not job_standalone:
if output_check_record in ['all', 'stdout']:
self._record_reference_stdout()
if output_check_record in ['all', 'stderr']:
self._record_reference_stderr()
# pylint: disable=E0702
if test_exception is not None:
raise test_exception
elif cleanup_exception is not None:
raise cleanup_exception
elif stdout_check_exception is not None:
raise stdout_check_exception
elif stderr_check_exception is not None:
raise stderr_check_exception
elif self.__log_warn_used:
raise exceptions.TestWarn("Test passed but there were warnings "
"during execution. Check the log for "
"details.")
self.status = 'PASS'
self.sysinfo_logger.end_test_hook()
def _setup_environment_variables(self):
os.environ['AVOCADO_VERSION'] = VERSION
if self.basedir is not None:
os.environ['AVOCADO_TEST_BASEDIR'] = self.basedir
if self.datadir is not None:
os.environ['AVOCADO_TEST_DATADIR'] = self.datadir
os.environ['AVOCADO_TEST_WORKDIR'] = self.workdir
os.environ['AVOCADO_TEST_SRCDIR'] = self.srcdir
os.environ['AVOCADO_TEST_LOGDIR'] = self.logdir
os.environ['AVOCADO_TEST_LOGFILE'] = self.logfile
os.environ['AVOCADO_TEST_OUTPUTDIR'] = self.outputdir
os.environ['AVOCADO_TEST_SYSINFODIR'] = self.sysinfodir
def run_avocado(self):
"""
Wraps the run method, for execution inside the avocado runner.
:result: Unused param, compatibility with :class:`unittest.TestCase`.
"""
self._setup_environment_variables()
try:
self._tag_start()
self._run_avocado()
except exceptions.TestBaseException as detail:
self.status = detail.status
self.fail_class = detail.__class__.__name__
self.fail_reason = detail
self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
except AssertionError as detail:
self.status = 'FAIL'
self.fail_class = detail.__class__.__name__
self.fail_reason = detail
self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
except Exception as detail:
self.status = 'ERROR'
tb_info = stacktrace.tb_info(sys.exc_info())
self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
try:
self.fail_class = str(detail.__class__.__name__)
self.fail_reason = str(detail)
except TypeError:
self.fail_class = "Exception"
self.fail_reason = ("Unable to get exception, check the "
"traceback for details.")
for e_line in tb_info:
self.log.error(e_line)
finally:
self._tag_end()
self._report()
self.log.info("")
with open(self.logfile, 'r') as log_file_obj:
self.text_output = log_file_obj.read()
self._stop_logging()
def _report(self):
"""
Report result to the logging system.
"""
if self.fail_reason is not None:
self.log.error("%s %s -> %s: %s", self.status,
self.name,
self.fail_class,
self.fail_reason)
else:
if self.status is None:
self.status = 'INTERRUPTED'
self.log.info("%s %s", self.status,
self.name)
def fail(self, message=None):
"""
Fails the currently running test.
After calling this method a test will be terminated and have its status
as FAIL.
:param message: an optional message that will be recorded in the logs
:type message: str
"""
raise exceptions.TestFail(message)
def error(self, message=None):
"""
Errors the currently running test.
After calling this method a test will be terminated and have its status
as ERROR.
:param message: an optional message that will be recorded in the logs
:type message: str
"""
raise exceptions.TestError(message)
def skip(self, message=None):
"""
Skips the currently running test.
This method should only be called from a test's setUp() method, not
anywhere else, since by definition, if a test gets to be executed, it
can't be skipped anymore. If you call this method outside setUp(),
avocado will mark your test status as ERROR, and instruct you to
fix your test in the error message.
:param message: an optional message that will be recorded in the logs
:type message: str
"""
raise exceptions.TestSkipError(message)
def fetch_asset(self, name, asset_hash=None, algorithm='sha1',
locations=None, expire=None):
"""
Method o call the utils.asset in order to fetch and asset file
supporting hash check, caching and multiple locations.
:param name: the asset filename or URL
:param asset_hash: asset hash (optional)
:param algorithm: hash algorithm (optional, defaults to sha1)
:param locations: list of URLs from where the asset can be
fetched (optional)
:param expire: time for the asset to expire
:raise EnvironmentError: When it fails to fetch the asset
:returns: asset file local path
"""
if expire is not None:
expire = data_structures.time_to_seconds(str(expire))
return asset.Asset(name, asset_hash, algorithm, locations,
self.cache_dirs, expire).fetch()
class SimpleTest(Test):
"""
Run an arbitrary command that returns either 0 (PASS) or !=0 (FAIL).
"""
re_avocado_log = re.compile(r'^\d\d:\d\d:\d\d DEBUG\| \[stdout\]'
r' \d\d:\d\d:\d\d WARN \|')
def __init__(self, name, params=None, base_logdir=None, job=None):
super(SimpleTest, self).__init__(name=name, params=params,
base_logdir=base_logdir, job=job)
self._command = self.filename
@property
def filename(self):
"""
Returns the name of the file (path) that holds the current test
"""
return os.path.abspath(self.name.name)
def _log_detailed_cmd_info(self, result):
"""
Log detailed command information.
:param result: :class:`avocado.utils.process.CmdResult` instance.
"""
self.log.info("Exit status: %s", result.exit_status)
self.log.info("Duration: %s", result.duration)
def execute_cmd(self):
"""
Run the executable, and log its detailed execution.
"""
try:
test_params = dict([(str(key), str(val)) for _, key, val in
self.params.iteritems()])
# process.run uses shlex.split(), the self.path needs to be escaped
result = process.run(self._command, verbose=True,
env=test_params)
self._log_detailed_cmd_info(result)
except process.CmdError as details:
self._log_detailed_cmd_info(details.result)
raise exceptions.TestFail(details)
def test(self):
"""
Run the test and postprocess the results
"""
self.execute_cmd()
for line in open(self.logfile):
if self.re_avocado_log.match(line):
raise exceptions.TestWarn("Test passed but there were warnings"
" on stdout during execution. Check "
"the log for details.")
class ExternalRunnerTest(SimpleTest):
def __init__(self, name, params=None, base_logdir=None, job=None,
external_runner=None):
self.assertIsNotNone(external_runner, "External runner test requires "
"external_runner parameter, got None instead.")
self.external_runner = external_runner
super(ExternalRunnerTest, self).__init__(name, params, base_logdir,
job)
self._command = external_runner.runner + " " + self.name.name
@property
def filename(self):
return None
def test(self):
pre_cwd = os.getcwd()
new_cwd = None
try:
self.log.info('Running test with the external level test '
'runner: "%s"', self.external_runner.runner)
# Change work directory if needed by the external runner
if self.external_runner.chdir == 'runner':
new_cwd = os.path.dirname(self.external_runner.runner)
elif self.external_runner.chdir == 'test':
new_cwd = self.external_runner.test_dir
else:
new_cwd = None
if new_cwd is not None:
self.log.debug('Changing working directory to "%s" '
'because of external runner requirements ',
new_cwd)
os.chdir(new_cwd)
self.execute_cmd()
finally:
if new_cwd is not None:
os.chdir(pre_cwd)
class MissingTest(Test):
"""
Handle when there is no such test module in the test directory.
"""
def test(self):
e_msg = ('Test %s could not be found in the test dir %s '
'(or test path does not exist)' %
(self.name, data_dir.get_test_dir()))
raise exceptions.TestNotFoundError(e_msg)
class NotATest(Test):
"""
The file is not a test.
Either a non executable python module with no avocado test class in it,
or a regular, non executable file.
"""
def test(self):
e_msg = ('File %s is not executable and does not contain an avocado '
'test class in it ' % self.name)
raise exceptions.NotATestError(e_msg)
class SkipTest(Test):
"""
Class intended as generic substitute for avocado tests which fails during
setUp phase using "self._skip_reason" message.
"""
_skip_reason = "Generic skip test reason"
def __init__(self, *args, **kwargs):
"""
This class substitutes other classes. Let's just ignore the remaining
arguments and only set the ones supported by avocado.Test
"""
super_kwargs = dict()
args = list(reversed(args))
for arg in ["methodName", "name", "params", "base_logdir", "job",
"runner_queue"]:
if arg in kwargs:
super_kwargs[arg] = kwargs[arg]
elif args:
super_kwargs[arg] = args.pop()
super(SkipTest, self).__init__(**super_kwargs)
def setUp(self):
raise exceptions.TestSkipError(self._skip_reason)
def test(self):
""" Should not be executed """
raise RuntimeError("This should never be executed!")
class TimeOutSkipTest(SkipTest):
"""
Skip test due job timeout.
This test is skipped due a job timeout.
It will never have a chance to execute.
"""
_skip_reason = "Test skipped due a job timeout!"
def setUp(self):
raise exceptions.TestTimeoutSkip(self._skip_reason)
class DryRunTest(SkipTest):
"""
Fake test which logs itself and reports as SKIP
"""
_skip_reason = "Test skipped due to --dry-run"
def setUp(self):
self.log.info("Test params:")
for path, key, value in self.params.iteritems():
self.log.info("%s:%s ==> %s", path, key, value)
super(DryRunTest, self).setUp()
class ReplaySkipTest(SkipTest):
"""
Skip test due to job replay filter.
This test is skipped due to a job replay filter.
It will never have a chance to execute.
"""
_skip_reason = "Test skipped due to a job replay filter!"
class TestError(Test):
"""
Generic test error.
"""
def __init__(self, *args, **kwargs):
exception = kwargs.pop('exception')
Test.__init__(self, *args, **kwargs)
self.exception = exception
def test(self):
self.error(self.exception)
| PandaWei/avocado | avocado/core/test.py | Python | gpl-2.0 | 31,962 |
# coding=utf-8
from Framework.Controller import Controller
from Database.Controllers.Disciplina import Disciplina as BDDisciplina
from Models.Disciplina.RespostaListar import RespostaListar
from Models.Disciplina.RespostaCadastrar import RespostaCadastrar
from Models.Disciplina.RespostaEditar import RespostaEditar
from Models.Disciplina.RespostaVer import RespostaVer
from Models.Disciplina.RespostaDeletar import RespostaDeletar
from Database.Models.Disciplina import Disciplina as ModelDisciplina
class Disciplina(Controller):
def Listar(self,pedido_listar):
return RespostaListar(BDDisciplina().pegarDisciplinas("WHERE id_departamento = %s AND nome LIKE %s LIMIT %s OFFSET %s",(str(pedido_listar.getIdDepartamento()),"%"+pedido_listar.getNome().replace(' ','%')+"%",str(pedido_listar.getQuantidade()),(str(pedido_listar.getQuantidade()*pedido_listar.getPagina())))))
def Ver(self, pedido_ver):
return RespostaVer(BDDisciplina().pegarDisciplina("WHERE id = %s ", (str(pedido_ver.getId()),)))
def Cadastrar(self,pedido_cadastrar):
disciplina = ModelDisciplina()
disciplina.setNome(pedido_cadastrar.getNome())
disciplina.setCodigo(pedido_cadastrar.getCodigo())
disciplina.setId_departamento(pedido_cadastrar.getId_departamento())
disciplina.setCreditos(pedido_cadastrar.getCreditos())
return RespostaCadastrar(BDDisciplina().inserirDisciplina(disciplina))
def Editar(self,pedido_editar):
disciplina = BDDisciplina().pegarDisciplina("WHERE id = %s ", (str(pedido_editar.getId()),))
disciplina.setNome(pedido_editar.getNome())
disciplina.setCodigo(pedido_editar.getCodigo())
disciplina.setId_departamento(pedido_editar.getId_departamento())
disciplina.setCreditos(pedido_editar.getCreditos())
BDDisciplina().alterarDisciplina(disciplina)
return RespostaEditar("Disciplina Editado com sucesso!")
def Deletar(self,pedido_deletar):
disciplina = BDDisciplina().pegarDisciplina("WHERE id = %s ", (str(pedido_deletar.getId()),))
BDDisciplina().removerDisciplina(disciplina)
return RespostaDeletar("Disciplina Removido com sucesso!")
| AEDA-Solutions/matweb | backend/Controllers/Disciplina.py | Python | mit | 2,080 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import unittest
import mock
import lxml.html
from nikola.post import get_meta
from nikola.utils import demote_headers, TranslatableSetting
class dummy(object):
pass
class GetMetaTest(unittest.TestCase):
def test_getting_metadata_from_content(self):
file_metadata = [".. title: Nikola needs more tests!\n",
".. slug: write-tests-now\n",
".. date: 2012/09/15 19:52:05\n",
".. tags:\n",
".. link:\n",
".. description:\n",
"Post content\n"]
opener_mock = mock.mock_open(read_data=file_metadata)
opener_mock.return_value.readlines.return_value = file_metadata
post = dummy()
post.source_path = 'file_with_metadata'
post.metadata_path = 'file_with_metadata.meta'
with mock.patch('nikola.post.io.open', opener_mock, create=True):
meta, nsm = get_meta(post)
self.assertEqual('Nikola needs more tests!', meta['title'])
self.assertEqual('write-tests-now', meta['slug'])
self.assertEqual('2012/09/15 19:52:05', meta['date'])
self.assertFalse('tags' in meta)
self.assertFalse('link' in meta)
self.assertFalse('description' in meta)
self.assertTrue(nsm)
def test_get_title_from_rest(self):
file_metadata = [".. slug: write-tests-now\n",
".. date: 2012/09/15 19:52:05\n",
".. tags:\n",
".. link:\n",
".. description:\n",
"Post Title\n",
"----------\n"]
opener_mock = mock.mock_open(read_data=file_metadata)
opener_mock.return_value.readlines.return_value = file_metadata
post = dummy()
post.source_path = 'file_with_metadata'
post.metadata_path = 'file_with_metadata.meta'
with mock.patch('nikola.post.io.open', opener_mock, create=True):
meta, nsm = get_meta(post)
self.assertEqual('Post Title', meta['title'])
self.assertEqual('write-tests-now', meta['slug'])
self.assertEqual('2012/09/15 19:52:05', meta['date'])
self.assertFalse('tags' in meta)
self.assertFalse('link' in meta)
self.assertFalse('description' in meta)
self.assertTrue(nsm)
def test_get_title_from_fname(self):
file_metadata = [".. slug: write-tests-now\n",
".. date: 2012/09/15 19:52:05\n",
".. tags:\n",
".. link:\n",
".. description:\n"]
opener_mock = mock.mock_open(read_data=file_metadata)
opener_mock.return_value.readlines.return_value = file_metadata
post = dummy()
post.source_path = 'file_with_metadata'
post.metadata_path = 'file_with_metadata.meta'
with mock.patch('nikola.post.io.open', opener_mock, create=True):
meta, nsm = get_meta(post, 'file_with_metadata')
self.assertEqual('file_with_metadata', meta['title'])
self.assertEqual('write-tests-now', meta['slug'])
self.assertEqual('2012/09/15 19:52:05', meta['date'])
self.assertFalse('tags' in meta)
self.assertFalse('link' in meta)
self.assertFalse('description' in meta)
self.assertTrue(nsm)
def test_use_filename_as_slug_fallback(self):
file_metadata = [".. title: Nikola needs more tests!\n",
".. date: 2012/09/15 19:52:05\n",
".. tags:\n",
".. link:\n",
".. description:\n",
"Post content\n"]
opener_mock = mock.mock_open(read_data=file_metadata)
opener_mock.return_value.readlines.return_value = file_metadata
post = dummy()
post.source_path = 'Slugify this'
post.metadata_path = 'Slugify this.meta'
with mock.patch('nikola.post.io.open', opener_mock, create=True):
meta, nsm = get_meta(post, 'Slugify this')
self.assertEqual('Nikola needs more tests!', meta['title'])
self.assertEqual('slugify-this', meta['slug'])
self.assertEqual('2012/09/15 19:52:05', meta['date'])
self.assertFalse('tags' in meta)
self.assertFalse('link' in meta)
self.assertFalse('description' in meta)
self.assertTrue(nsm)
def test_extracting_metadata_from_filename(self):
post = dummy()
post.source_path = '2013-01-23-the_slug-dubdubtitle.md'
post.metadata_path = '2013-01-23-the_slug-dubdubtitle.meta'
with mock.patch('nikola.post.io.open', create=True):
meta, _ = get_meta(
post,
'(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md')
self.assertEqual('dubdubtitle', meta['title'])
self.assertEqual('the_slug', meta['slug'])
self.assertEqual('2013-01-23', meta['date'])
def test_get_meta_slug_only_from_filename(self):
post = dummy()
post.source_path = 'some/path/the_slug.md'
post.metadata_path = 'some/path/the_slug.meta'
with mock.patch('nikola.post.io.open', create=True):
meta, _ = get_meta(post)
self.assertEqual('the_slug', meta['slug'])
class HeaderDemotionTest(unittest.TestCase):
def demote_by_zero(self):
input_str = '''\
<h1>header 1</h1>
<h2>header 2</h2>
<h3>header 3</h3>
<h4>header 4</h4>
<h5>header 5</h5>
<h6>header 6</h6>
'''
expected_output = '''\
<h1>header 1</h1>
<h2>header 2</h2>
<h3>header 3</h3>
<h4>header 4</h4>
<h5>header 5</h5>
<h6>header 6</h6>
'''
doc = lxml.html.fromstring(input_str)
outdoc = lxml.html.fromstring(expected_output)
demote_headers(doc, 0)
self.assertEquals(lxml.html.tostring(outdoc), lxml.html.tostring(doc))
def demote_by_one(self):
input_str = '''\
<h1>header 1</h1>
<h2>header 2</h2>
<h3>header 3</h3>
<h4>header 4</h4>
<h5>header 5</h5>
<h6>header 6</h6>
'''
expected_output = '''\
<h2>header 1</h2>
<h3>header 2</h3>
<h4>header 3</h4>
<h5>header 4</h5>
<h6>header 5</h6>
<h6>header 6</h6>
'''
doc = lxml.html.fromstring(input_str)
outdoc = lxml.html.fromstring(expected_output)
demote_headers(doc, 1)
self.assertEquals(lxml.html.tostring(outdoc), lxml.html.tostring(doc))
def demote_by_two(self):
input_str = '''\
<h1>header 1</h1>
<h2>header 2</h2>
<h3>header 3</h3>
<h4>header 4</h4>
<h5>header 5</h5>
<h6>header 6</h6>
'''
expected_output = '''\
<h3>header 1</h3>
<h4>header 2</h4>
<h5>header 3</h5>
<h6>header 4</h6>
<h6>header 5</h6>
<h6>header 6</h6>
'''
doc = lxml.html.fromstring(input_str)
outdoc = lxml.html.fromstring(expected_output)
demote_headers(doc, 2)
self.assertEquals(lxml.html.tostring(outdoc), lxml.html.tostring(doc))
def demote_by_minus_one(self):
input_str = '''\
<h1>header 1</h1>
<h2>header 2</h2>
<h3>header 3</h3>
<h4>header 4</h4>
<h5>header 5</h5>
<h6>header 6</h6>
'''
expected_output = '''\
<h1>header 1</h1>
<h1>header 2</h1>
<h2>header 3</h2>
<h3>header 4</h3>
<h4>header 5</h4>
<h5>header 6</h5>
'''
doc = lxml.html.fromstring(input_str)
outdoc = lxml.html.fromstring(expected_output)
demote_headers(doc, -1)
self.assertEquals(lxml.html.tostring(outdoc), lxml.html.tostring(doc))
class TranslatableSettingsTest(unittest.TestCase):
"""Tests for translatable settings."""
def test_string_input(self):
"""Tests for string input."""
inp = 'Fancy Blog'
S = TranslatableSetting('S', inp, {'xx': ''})
S.default_lang = 'xx'
S.lang = 'xx'
try:
u = unicode(S)
except NameError: # Python 3
u = str(S)
cn = S() # no language specified
cr = S('xx') # real language specified
cf = S('zz') # fake language specified
self.assertEqual(inp, u)
self.assertEqual(inp, cn)
self.assertEqual(inp, cr)
self.assertEqual(inp, cf)
self.assertEqual(S.lang, 'xx')
self.assertEqual(S.default_lang, 'xx')
def test_dict_input(self):
"""Tests for dict input."""
inp = {'xx': 'Fancy Blog',
'zz': 'Schmancy Blog'}
S = TranslatableSetting('S', inp, {'xx': '', 'zz': ''})
S.default_lang = 'xx'
S.lang = 'xx'
try:
u = unicode(S)
except NameError: # Python 3
u = str(S)
cn = S()
cx = S('xx')
cz = S('zz')
cf = S('ff')
self.assertEqual(inp['xx'], u)
self.assertEqual(inp['xx'], cn)
self.assertEqual(inp['xx'], cx)
self.assertEqual(inp['zz'], cz)
self.assertEqual(inp['xx'], cf)
def test_dict_input_lang(self):
"""Test dict input, with a language change along the way."""
inp = {'xx': 'Fancy Blog',
'zz': 'Schmancy Blog'}
S = TranslatableSetting('S', inp, {'xx': '', 'zz': ''})
S.default_lang = 'xx'
S.lang = 'xx'
try:
u = unicode(S)
except NameError: # Python 3
u = str(S)
cn = S()
self.assertEqual(inp['xx'], u)
self.assertEqual(inp['xx'], cn)
# Change the language.
# WARNING: DO NOT set lang locally in real code! Set it globally
# instead! (TranslatableSetting.lang = ...)
# WARNING: TranslatableSetting.lang is used to override the current
# locale settings returned by LocaleBorg! Use with care!
S.lang = 'zz'
try:
u = unicode(S)
except NameError: # Python 3
u = str(S)
cn = S()
self.assertEqual(inp['zz'], u)
self.assertEqual(inp['zz'], cn)
if __name__ == '__main__':
unittest.main()
| JohnTroony/nikola | tests/test_utils.py | Python | mit | 10,555 |
from functools import wraps
import bottle
def requires_session(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
sesh = bottle.request.environ.get('beaker.session')
kwargs['sesh'] = sesh
return fun(*args, **kwargs)
return wrapper
def requires_login(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
sesh = bottle.request.environ.get('beaker.session')
if 'user' in sesh and 'id' in sesh['user']:
return fun(*args, **kwargs)
else:
bottle.abort(401, 'User must be logged in.')
return wrapper
def requires_site_admin(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
sesh = bottle.request.environ.get('beaker.session')
if 'user' in sesh and sesh['user']['site_admin'] is True:
return fun(*args, **kwargs)
else:
bottle.abort(401, 'Site admin privileges required.')
return wrapper | tipsqueal/PuPPyREST | puppy/helpers.py | Python | mit | 935 |
'''
Created on 2013-10-8
@author: xsank
'''
import sys
from structure import transaction
def finish(msg,exitCode):
if exitCode!=0:
print msg
sys.exit(exitCode)
if __name__=='__main__':
repos=sys.argv[1]
txn=sys.argv[2]
trans=transaction.Transaction(repos,txn)
msg,exitCode=trans.check()
trans.cleanup()
finish(msg,exitCode)
| xsank/SVNchecker | main.py | Python | mit | 374 |
# $Id: tfont.py,v 1.2 2003/09/14 04:31:39 riq Exp $
#
# Tenes Empanadas Graciela
# Copyright 2000,2003 Ricardo Quesada (riq@coresecurity.com)
#
import pygame
if not pygame.font.get_init():
pygame.font.init()
TFont = {
'helvetica 8' : pygame.font.SysFont('helvetica',8),
'helvetica 10' : pygame.font.SysFont('helvetica',10),
'helvetica 12' : pygame.font.SysFont('helvetica',12),
'helvetica 16' : pygame.font.SysFont('helvetica',16,0),
'helvetica 16b' : pygame.font.SysFont('helvetica',16,1),
'helvetica 20' : pygame.font.SysFont('helvetica',20,0),
'helvetica 20b' : pygame.font.SysFont('helvetica',20,1)
}
| JeroenDeDauw/teg | python/client/gui/tfont.py | Python | gpl-3.0 | 655 |
from bibliopixel.animation.matrix import Matrix
from bibliopixel.colors import COLORS
from bibliopixel.layout import font
class ScrollText(Matrix):
COLOR_DEFAULTS = (('bgcolor', COLORS.Off), ('color', COLORS.White))
def __init__(self, layout, text='ScrollText', xPos=0, yPos=0,
font_name=font.default_font, font_scale=1, **kwds):
super().__init__(layout, **kwds)
self._text = text
self.xPos = xPos
self.orig_xPos = xPos
self.yPos = yPos
self.font_name = font_name
self.font_scale = font_scale
self._strW = font.str_dim(text, font_name, font_scale, True)[0]
def pre_run(self):
self.xPos = self.orig_xPos
def step(self, amt=1):
self.layout.all_off()
bg = self.palette(0)
color = self.palette(1)
self.layout.drawText(self._text, self.xPos, self.yPos,
color=color, bg=bg, font=self.font_name,
font_scale=self.font_scale)
self.xPos -= amt
if self.xPos + self._strW <= 0:
self.xPos = self.width - 1
self.animComplete = True
class BounceText(Matrix):
COLOR_DEFAULTS = (('bgcolor', COLORS.Off), ('color', COLORS.White))
def __init__(self, layout, text='BounceText', xPos=0, yPos=0, buffer=0,
font_name=font.default_font, font_scale=1, **kwds):
super().__init__(layout, **kwds)
self._text = text
self.xPos = xPos
self.yPos = yPos
self.font_name = font_name
self.font_scale = font_scale
self._strW = font.str_dim(text, font_name, font_scale, True)[0]
self._dir = -1
self._buffer = buffer
def step(self, amt=1):
self.layout.all_off()
self.layout.drawText(self._text, self.xPos, self.yPos,
color=self.color, bg=self.bgcolor,
font=self.font_name, font_scale=self.font_scale)
if self._strW < self.width:
if self.xPos <= 0 + self._buffer and self._dir == -1:
self._dir = 1
elif self.xPos + self._strW > self.width - self._buffer and self._dir == 1:
self._dir = -1
self.animComplete = True
else:
if self.xPos + self._strW <= self.width - self._buffer and self._dir == -1:
self._dir = 1
elif self.xPos >= 0 + self._buffer and self._dir == 1:
self._dir = -1
self.animComplete = True
self.xPos += amt * self._dir
| ManiacalLabs/BiblioPixelAnimations | BiblioPixelAnimations/matrix/Text.py | Python | mit | 2,583 |
import unittest
from hamcrest import assert_that, equal_to
from mock import MagicMock, call
from mac_os_scripts.add_computer_to_group import ComputerToGroupAdder
from mac_os_scripts_tests.test_common import _NO_OUTPUT
_TMP_REGISTER_LDIF = """dn: CN=Developers,OU=Users,OU=Groups,OU=Some Place,DC=some,DC=domain,DC=com
changetype: modify
add: member
member: CN=some_hostname,OU=macOS,OU=Computers,OU=Some Place,DC=some,DC=domain,DC=com
"""
class ComputerToGroupAdderTest(unittest.TestCase):
def setUp(self):
self._subject = ComputerToGroupAdder(
sudo_password='Password1',
)
self._subject.run_command = MagicMock()
self._subject.read_file = MagicMock()
self._subject.write_file = MagicMock()
self._subject.get_hostname = MagicMock()
def test_build_register_ldif(self):
self._subject.run_command.return_value = _NO_OUTPUT
assert_that(
self._subject.build_register_ldif(
source_ou_path='CN=some_hostname,OU=macOS,OU=Computers,OU=Some Place,DC=some,DC=domain,DC=com',
destination_ou_path='CN=Developers,OU=Users,OU=Groups,OU=Some Place,DC=some,DC=domain,DC=com',
),
equal_to(True)
)
assert_that(
self._subject.write_file.mock_calls,
equal_to([
call('/tmp/register.ldif', _TMP_REGISTER_LDIF)
])
)
def test_add_computer_to_group(self):
self._subject.run_command.return_value = _NO_OUTPUT
self._subject.get_hostname.return_value = 'SomeHostname'
assert_that(
self._subject.add_computer_to_group(
fqdn='some.domain.com',
domain_username='some.admin',
domain_password='P\@\$\$w0rd123\!\@\#'
),
equal_to(True)
)
assert_that(
self._subject.run_command.mock_calls,
equal_to([
call(
command_line='/usr/bin/ldapmodify -H ldap://some.domain.com -f /tmp/register.ldif -D some.admin -w P\\@\\$\\$w0rd123\\!\\@\\# -x -c -v',
quiet=True, sudo_password_override=False, timeout=None, send_lines=None
)
])
)
def test_run_pass(self):
self._subject.build_register_ldif = MagicMock()
self._subject.build_register_ldif.return_value = True
self._subject.add_computer_to_group = MagicMock()
self._subject.add_computer_to_group.return_value = True
assert_that(
self._subject.run(
source_ou_path='CN=some_hostname,OU=macOS,OU=Computers,OU=Some Place,DC=some,DC=domain,DC=com',
destination_ou_path='CN=Developers,OU=Users,OU=Groups,OU=Some Place,DC=some,DC=domain,DC=com',
fqdn='some.domain.com',
domain_username='some.admin',
domain_password='P\@\$\$w0rd123\!\@\#'
),
equal_to(True)
)
| initialed85/mac_os_scripts | mac_os_scripts_tests/add_computer_to_group_test.py | Python | mit | 3,008 |
"""
Renderers are used to serialize a response into specific media types.
They give us a generic way of being able to handle various media types
on the response, such as JSON encoded data or HTML output.
REST framework also provides an HTML renderer the renders the browsable API.
"""
import copy
import string
from django import forms
from django.http.multipartparser import parse_header
from django.template import RequestContext, loader, Template
from django.utils import simplejson as json
from rest_framework.compat import yaml
from rest_framework.exceptions import ConfigurationError
from rest_framework.settings import api_settings
from rest_framework.request import clone_request
from rest_framework.utils import dict2xml
from rest_framework.utils import encoders
from rest_framework.utils.breadcrumbs import get_breadcrumbs
from rest_framework import VERSION, status
from rest_framework import serializers, parsers
class BaseRenderer(object):
"""
All renderers should extend this class, setting the `media_type`
and `format` attributes, and override the `.render()` method.
"""
media_type = None
format = None
def render(self, data, accepted_media_type=None, renderer_context=None):
raise NotImplemented('Renderer class requires .render() to be implemented')
class JSONRenderer(BaseRenderer):
"""
Renderer which serializes to json.
"""
media_type = 'application/json'
format = 'json'
encoder_class = encoders.JSONEncoder
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `obj` into json.
"""
if data is None:
return ''
# If 'indent' is provided in the context, then pretty print the result.
# E.g. If we're being called by the BrowseableAPIRenderer.
renderer_context = renderer_context or {}
indent = renderer_context.get('indent', None)
if accepted_media_type:
# If the media type looks like 'application/json; indent=4',
# then pretty print the result.
base_media_type, params = parse_header(accepted_media_type)
indent = params.get('indent', indent)
try:
indent = max(min(int(indent), 8), 0)
except (ValueError, TypeError):
indent = None
return json.dumps(data, cls=self.encoder_class, indent=indent)
class JSONPRenderer(JSONRenderer):
"""
Renderer which serializes to json,
wrapping the json output in a callback function.
"""
media_type = 'application/javascript'
format = 'jsonp'
callback_parameter = 'callback'
default_callback = 'callback'
def get_callback(self, renderer_context):
"""
Determine the name of the callback to wrap around the json output.
"""
request = renderer_context.get('request', None)
params = request and request.GET or {}
return params.get(self.callback_parameter, self.default_callback)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders into jsonp, wrapping the json output in a callback function.
Clients may set the callback function name using a query parameter
on the URL, for example: ?callback=exampleCallbackName
"""
renderer_context = renderer_context or {}
callback = self.get_callback(renderer_context)
json = super(JSONPRenderer, self).render(data, accepted_media_type,
renderer_context)
return u"%s(%s);" % (callback, json)
class XMLRenderer(BaseRenderer):
"""
Renderer which serializes to XML.
"""
media_type = 'application/xml'
format = 'xml'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders *obj* into serialized XML.
"""
if data is None:
return ''
return dict2xml(data)
class YAMLRenderer(BaseRenderer):
"""
Renderer which serializes to YAML.
"""
media_type = 'application/yaml'
format = 'yaml'
encoder = encoders.SafeDumper
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders *obj* into serialized YAML.
"""
if data is None:
return ''
return yaml.dump(data, stream=None, Dumper=self.encoder)
class TemplateHTMLRenderer(BaseRenderer):
"""
An HTML renderer for use with templates.
The data supplied to the Response object should be a dictionary that will
be used as context for the template.
The template name is determined by (in order of preference):
1. An explicit `.template_name` attribute set on the response.
2. An explicit `.template_name` attribute set on this class.
3. The return result of calling `view.get_template_names()`.
For example:
data = {'users': User.objects.all()}
return Response(data, template_name='users.html')
For pre-rendered HTML, see StaticHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
template_name = None
exception_template_names = [
'%(status_code)s.html',
'api_exception.html'
]
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders data to HTML, using Django's standard template rendering.
The template name is determined by (in order of preference):
1. An explicit .template_name set on the response.
2. An explicit .template_name set on this class.
3. The return result of calling view.get_template_names().
"""
renderer_context = renderer_context or {}
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
if response.exception:
template = self.get_exception_template(response)
else:
template_names = self.get_template_names(response, view)
template = self.resolve_template(template_names)
context = self.resolve_context(data, request, response)
return template.render(context)
def resolve_template(self, template_names):
return loader.select_template(template_names)
def resolve_context(self, data, request, response):
if response.exception:
data['status_code'] = response.status_code
return RequestContext(request, data)
def get_template_names(self, response, view):
if response.template_name:
return [response.template_name]
elif self.template_name:
return [self.template_name]
elif hasattr(view, 'get_template_names'):
return view.get_template_names()
raise ConfigurationError('Returned a template response with no template_name')
def get_exception_template(self, response):
template_names = [name % {'status_code': response.status_code}
for name in self.exception_template_names]
try:
# Try to find an appropriate error template
return self.resolve_template(template_names)
except:
# Fall back to using eg '404 Not Found'
return Template('%d %s' % (response.status_code,
response.status_text.title()))
# Note, subclass TemplateHTMLRenderer simply for the exception behavior
class StaticHTMLRenderer(TemplateHTMLRenderer):
"""
An HTML renderer class that simply returns pre-rendered HTML.
The data supplied to the Response object should be a string representing
the pre-rendered HTML content.
For example:
data = '<html><body>example</body></html>'
return Response(data)
For template rendered HTML, see TemplateHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
response = renderer_context['response']
if response and response.exception:
request = renderer_context['request']
template = self.get_exception_template(response)
context = self.resolve_context(data, request, response)
return template.render(context)
return data
class BrowsableAPIRenderer(BaseRenderer):
"""
HTML renderer used to self-document the API.
"""
media_type = 'text/html'
format = 'api'
template = 'rest_framework/api.html'
def get_default_renderer(self, view):
"""
Return an instance of the first valid renderer.
(Don't use another documenting renderer.)
"""
renderers = [renderer for renderer in view.renderer_classes
if not issubclass(renderer, BrowsableAPIRenderer)]
if not renderers:
return None
return renderers[0]()
def get_content(self, renderer, data,
accepted_media_type, renderer_context):
"""
Get the content as if it had been rendered by the default
non-documenting renderer.
"""
if not renderer:
return '[No renderers were found]'
renderer_context['indent'] = 4
content = renderer.render(data, accepted_media_type, renderer_context)
if not all(char in string.printable for char in content):
return '[%d bytes of binary content]'
return content
def show_form_for_method(self, view, method, request, obj):
"""
Returns True if a form should be shown for this method.
"""
if not method in view.allowed_methods:
return # Not a valid method
if not api_settings.FORM_METHOD_OVERRIDE:
return # Cannot use form overloading
request = clone_request(request, method)
try:
if not view.has_permission(request, obj):
return # Don't have permission
except:
return # Don't have permission and exception explicitly raise
return True
def serializer_to_form_fields(self, serializer):
field_mapping = {
serializers.FloatField: forms.FloatField,
serializers.IntegerField: forms.IntegerField,
serializers.DateTimeField: forms.DateTimeField,
serializers.DateField: forms.DateField,
serializers.EmailField: forms.EmailField,
serializers.RegexField: forms.RegexField,
serializers.CharField: forms.CharField,
serializers.ChoiceField: forms.ChoiceField,
serializers.BooleanField: forms.BooleanField,
serializers.PrimaryKeyRelatedField: forms.ChoiceField,
serializers.ManyPrimaryKeyRelatedField: forms.MultipleChoiceField,
serializers.SlugRelatedField: forms.ChoiceField,
serializers.ManySlugRelatedField: forms.MultipleChoiceField,
serializers.HyperlinkedRelatedField: forms.ChoiceField,
serializers.ManyHyperlinkedRelatedField: forms.MultipleChoiceField,
serializers.FileField: forms.FileField,
serializers.ImageField: forms.ImageField,
}
fields = {}
for k, v in serializer.get_fields().items():
if getattr(v, 'read_only', True):
continue
kwargs = {}
kwargs['required'] = v.required
#if getattr(v, 'queryset', None):
# kwargs['queryset'] = v.queryset
if getattr(v, 'choices', None) is not None:
kwargs['choices'] = v.choices
if getattr(v, 'widget', None):
widget = copy.deepcopy(v.widget)
kwargs['widget'] = widget
if getattr(v, 'default', None) is not None:
kwargs['initial'] = v.default
kwargs['label'] = k
try:
fields[k] = field_mapping[v.__class__](**kwargs)
except KeyError:
if getattr(v, 'choices', None) is not None:
fields[k] = forms.ChoiceField(**kwargs)
else:
fields[k] = forms.CharField(**kwargs)
return fields
def get_form(self, view, method, request):
"""
Get a form, possibly bound to either the input or output data.
In the absence on of the Resource having an associated form then
provide a form that can be used to submit arbitrary content.
"""
obj = getattr(view, 'object', None)
if not self.show_form_for_method(view, method, request, obj):
return
if method == 'DELETE' or method == 'OPTIONS':
return True # Don't actually need to return a form
if not getattr(view, 'get_serializer', None) or not parsers.FormParser in view.parser_classes:
media_types = [parser.media_type for parser in view.parser_classes]
return self.get_generic_content_form(media_types)
serializer = view.get_serializer(instance=obj)
fields = self.serializer_to_form_fields(serializer)
# Creating an on the fly form see:
# http://stackoverflow.com/questions/3915024/dynamically-creating-classes-python
OnTheFlyForm = type("OnTheFlyForm", (forms.Form,), fields)
data = (obj is not None) and serializer.data or None
form_instance = OnTheFlyForm(data)
return form_instance
def get_generic_content_form(self, media_types):
"""
Returns a form that allows for arbitrary content types to be tunneled
via standard HTML forms.
(Which are typically application/x-www-form-urlencoded)
"""
# If we're not using content overloading there's no point in supplying a generic form,
# as the view won't treat the form's value as the content of the request.
if not (api_settings.FORM_CONTENT_OVERRIDE
and api_settings.FORM_CONTENTTYPE_OVERRIDE):
return None
content_type_field = api_settings.FORM_CONTENTTYPE_OVERRIDE
content_field = api_settings.FORM_CONTENT_OVERRIDE
choices = [(media_type, media_type) for media_type in media_types]
initial = media_types[0]
# NB. http://jacobian.org/writing/dynamic-form-generation/
class GenericContentForm(forms.Form):
def __init__(self):
super(GenericContentForm, self).__init__()
self.fields[content_type_field] = forms.ChoiceField(
label='Content Type',
choices=choices,
initial=initial
)
self.fields[content_field] = forms.CharField(
label='Content',
widget=forms.Textarea
)
return GenericContentForm()
def get_name(self, view):
try:
return view.get_name()
except AttributeError:
return view.__doc__
def get_description(self, view):
try:
return view.get_description(html=True)
except AttributeError:
return view.__doc__
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders *obj* using the :attr:`template` set on the class.
The context used in the template contains all the information
needed to self-document the response to this request.
"""
accepted_media_type = accepted_media_type or ''
renderer_context = renderer_context or {}
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
content = self.get_content(renderer, data, accepted_media_type, renderer_context)
put_form = self.get_form(view, 'PUT', request)
post_form = self.get_form(view, 'POST', request)
delete_form = self.get_form(view, 'DELETE', request)
options_form = self.get_form(view, 'OPTIONS', request)
name = self.get_name(view)
description = self.get_description(view)
breadcrumb_list = get_breadcrumbs(request.path)
template = loader.get_template(self.template)
context = RequestContext(request, {
'content': content,
'view': view,
'request': request,
'response': response,
'description': description,
'name': name,
'version': VERSION,
'breadcrumblist': breadcrumb_list,
'allowed_methods': view.allowed_methods,
'available_formats': [renderer.format for renderer in view.renderer_classes],
'put_form': put_form,
'post_form': post_form,
'delete_form': delete_form,
'options_form': options_form,
'api_settings': api_settings
})
ret = template.render(context)
# Munge DELETE Response code to allow us to return content
# (Do this *after* we've rendered the template so that we include
# the normal deletion response code in the output)
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_200_OK
return ret
| cloudcopy/seahub | thirdpart/rest_framework/renderers.py | Python | apache-2.0 | 17,423 |
import ast
import pkg_resources
import unittest
from rgkit.gamestate import GameState
from rgkit.settings import settings
map_data = ast.literal_eval(
open(pkg_resources.resource_filename('rgkit', 'maps/default.py')).read())
settings.init_map(map_data)
class TestStateMisc(unittest.TestCase):
def test_add_robot(self):
state = GameState()
state.add_robot((9, 9), 0, robot_id=7, hp=42)
self.assertTrue((9, 9) in state.robots)
self.assertEquals(state.robots[(9, 9)].player_id, 0)
self.assertEquals(state.robots[(9, 9)].robot_id, 7)
self.assertEquals(state.robots[(9, 9)].hp, 42)
def test_robot_ids_are_unique(self):
state = GameState()
state.add_robot((9, 9), 0)
state.add_robot((8, 8), 0)
self.assertNotEquals(state.robots[(9, 9)].robot_id,
state.robots[(8, 8)].robot_id)
def test_is_robot(self):
state = GameState()
state.add_robot((9, 9), 0)
self.assertTrue(state.is_robot((9, 9)))
self.assertFalse(state.is_robot((8, 8)))
def test_remove_robot(self):
state = GameState()
state.add_robot((9, 9), 0)
state.remove_robot((9, 9))
self.assertFalse(state.is_robot((9, 9)))
def test_scores(self):
state = GameState()
state.add_robot((9, 9), 0)
state.add_robot((6, 11), 1)
state.add_robot((8, 14), 0)
self.assertEqual(state.get_scores(), [2, 1])
def test_get_game_info(self):
state = GameState()
state.add_robot((9, 9), 0)
state.add_robot((6, 11), 1)
game_info = state.get_game_info(0)
self.assertEquals(game_info.robots[9, 9].location, (9, 9))
self.assertEquals(game_info.robots[9, 9].hp, state.robots[(9, 9)].hp)
self.assertEquals(game_info.robots[9, 9].player_id,
state.robots[(9, 9)].player_id)
self.assertEquals(game_info.robots[9, 9].robot_id,
state.robots[(9, 9)].robot_id)
self.assertEquals(game_info.robots[6, 11].location, (6, 11))
self.assertEquals(game_info.robots[6, 11].hp, state.robots[(6, 11)].hp)
self.assertEquals(game_info.robots[6, 11].player_id,
state.robots[(6, 11)].player_id)
self.assertRaises(AttributeError,
lambda: game_info.robots[6, 11].robot_id)
self.assertEquals(game_info.turn, state.turn)
| RobotGame/rgkit | test/state_misc_test.py | Python | unlicense | 2,473 |
from distutils.core import setup
import py2exe
"""
After adding py2exe to your python distribution (using eas_install with the executable to install into a specific version of python), build with:
python setup.py py2exe
The executable file is placed in /dist/route_test.exe
See more information about py2exe here:
http://www.py2exe.org/
http://www.py2exe.org/index.cgi/Tutorial
"""
setup(console=['route_test.py']) | turtlemonvh/traffic-monitor | setup.py | Python | mit | 420 |
"""add the user_policy table
Revision ID: ca69b099820
Revises: 3beeaef02651
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import Column
# revision identifiers, used by Alembic.
revision = 'ca69b099820'
down_revision = '3beeaef02651'
def upgrade():
op.create_table(
'auth_user_policy',
Column(
'user_uuid',
sa.String(38),
sa.ForeignKey('auth_user.uuid', ondelete='CASCADE'),
primary_key=True,
),
Column(
'policy_uuid',
sa.String(38),
sa.ForeignKey('auth_policy.uuid', ondelete='CASCADE'),
primary_key=True,
),
)
def downgrade():
op.drop_table('auth_user_email')
| wazo-pbx/xivo-auth | alembic/versions/ca69b099820_add_the_user_policy_table.py | Python | gpl-3.0 | 745 |
from __future__ import unicode_literals
import copy
import datetime
import random
import re
import string
from moto.core.utils import (
camelcase_to_underscores,
iso_8601_datetime_with_milliseconds,
)
import six
def random_id(size=13):
chars = list(range(10)) + list(string.ascii_uppercase)
return "".join(six.text_type(random.choice(chars)) for x in range(size))
def random_cluster_id(size=13):
return "j-{0}".format(random_id())
def random_step_id(size=13):
return "s-{0}".format(random_id())
def random_instance_group_id(size=13):
return "i-{0}".format(random_id())
def steps_from_query_string(querystring_dict):
steps = []
for step in querystring_dict:
step["jar"] = step.pop("hadoop_jar_step._jar")
step["properties"] = dict(
(o["Key"], o["Value"]) for o in step.get("properties", [])
)
step["args"] = []
idx = 1
keyfmt = "hadoop_jar_step._args.member.{0}"
while keyfmt.format(idx) in step:
step["args"].append(step.pop(keyfmt.format(idx)))
idx += 1
steps.append(step)
return steps
class Unflattener:
@staticmethod
def unflatten_complex_params(input_dict, param_name):
"""Function to unflatten (portions of) dicts with complex keys. The moto request parser flattens the incoming
request bodies, which is generally helpful, but for nested dicts/lists can result in a hard-to-manage
parameter exposion. This function allows one to selectively unflatten a set of dict keys, replacing them
with a deep dist/list structure named identically to the root component in the complex name.
Complex keys are composed of multiple components
separated by periods. Components may be prefixed with _, which is stripped. Lists indexes are represented
with two components, 'member' and the index number."""
items_to_process = {}
for k in input_dict.keys():
if k.startswith(param_name):
items_to_process[k] = input_dict[k]
if len(items_to_process) == 0:
return
for k in items_to_process.keys():
del input_dict[k]
for k in items_to_process.keys():
Unflattener._set_deep(k, input_dict, items_to_process[k])
@staticmethod
def _set_deep(complex_key, container, value):
keys = complex_key.split(".")
keys.reverse()
while len(keys) > 0:
if len(keys) == 1:
key = keys.pop().strip("_")
Unflattener._add_to_container(container, key, value)
else:
key = keys.pop().strip("_")
if keys[-1] == "member":
keys.pop()
if not Unflattener._key_in_container(container, key):
container = Unflattener._add_to_container(container, key, [])
else:
container = Unflattener._get_child(container, key)
else:
if not Unflattener._key_in_container(container, key):
container = Unflattener._add_to_container(container, key, {})
else:
container = Unflattener._get_child(container, key)
@staticmethod
def _add_to_container(container, key, value):
if type(container) is dict:
container[key] = value
elif type(container) is list:
i = int(key)
while len(container) < i:
container.append(None)
container[i - 1] = value
return value
@staticmethod
def _get_child(container, key):
if type(container) is dict:
return container[key]
elif type(container) is list:
i = int(key)
return container[i - 1]
@staticmethod
def _key_in_container(container, key):
if type(container) is dict:
return key in container
elif type(container) is list:
i = int(key)
return len(container) >= i
class CamelToUnderscoresWalker:
"""A class to convert the keys in dict/list hierarchical data structures from CamelCase to snake_case (underscores)"""
@staticmethod
def parse(x):
if isinstance(x, dict):
return CamelToUnderscoresWalker.parse_dict(x)
elif isinstance(x, list):
return CamelToUnderscoresWalker.parse_list(x)
else:
return CamelToUnderscoresWalker.parse_scalar(x)
@staticmethod
def parse_dict(x):
temp = {}
for key in x.keys():
temp[camelcase_to_underscores(key)] = CamelToUnderscoresWalker.parse(x[key])
return temp
@staticmethod
def parse_list(x):
temp = []
for i in x:
temp.append(CamelToUnderscoresWalker.parse(i))
return temp
@staticmethod
def parse_scalar(x):
return x
class ReleaseLabel(object):
version_re = re.compile(r"^emr-(\d+)\.(\d+)\.(\d+)$")
def __init__(self, release_label):
major, minor, patch = self.parse(release_label)
self.major = major
self.minor = minor
self.patch = patch
@classmethod
def parse(cls, release_label):
if not release_label:
raise ValueError("Invalid empty ReleaseLabel: %r" % release_label)
match = cls.version_re.match(release_label)
if not match:
raise ValueError("Invalid ReleaseLabel: %r" % release_label)
major, minor, patch = match.groups()
major = int(major)
minor = int(minor)
patch = int(patch)
return major, minor, patch
def __str__(self):
version = "emr-%d.%d.%d" % (self.major, self.minor, self.patch)
return version
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, str(self))
def __iter__(self):
return iter((self.major, self.minor, self.patch))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.major == other.major
and self.minor == other.minor
and self.patch == other.patch
)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return tuple(self) != tuple(other)
def __lt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return tuple(self) < tuple(other)
def __le__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return tuple(self) <= tuple(other)
def __gt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return tuple(self) > tuple(other)
def __ge__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return tuple(self) >= tuple(other)
class EmrManagedSecurityGroup(object):
class Kind:
MASTER = "Master"
SLAVE = "Slave"
SERVICE = "Service"
kind = None
group_name = ""
short_name = ""
desc_fmt = "{short_name} for Elastic MapReduce created on {created}"
@classmethod
def description(cls):
created = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
return cls.desc_fmt.format(short_name=cls.short_name, created=created)
class EmrManagedMasterSecurityGroup(EmrManagedSecurityGroup):
kind = EmrManagedSecurityGroup.Kind.MASTER
group_name = "ElasticMapReduce-Master-Private"
short_name = "Master"
class EmrManagedSlaveSecurityGroup(EmrManagedSecurityGroup):
kind = EmrManagedSecurityGroup.Kind.SLAVE
group_name = "ElasticMapReduce-Slave-Private"
short_name = "Slave"
class EmrManagedServiceAccessSecurityGroup(EmrManagedSecurityGroup):
kind = EmrManagedSecurityGroup.Kind.SERVICE
group_name = "ElasticMapReduce-ServiceAccess"
short_name = "Service access"
class EmrSecurityGroupManager(object):
MANAGED_RULES_EGRESS = [
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.MASTER,
"from_port": None,
"ip_protocol": "-1",
"ip_ranges": [{"CidrIp": "0.0.0.0/0"}],
"to_port": None,
"source_group_ids": [],
},
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.SLAVE,
"from_port": None,
"ip_protocol": "-1",
"ip_ranges": [{"CidrIp": "0.0.0.0/0"}],
"to_port": None,
"source_group_ids": [],
},
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.SERVICE,
"from_port": 8443,
"ip_protocol": "tcp",
"ip_ranges": [],
"to_port": 8443,
"source_group_ids": [
EmrManagedSecurityGroup.Kind.MASTER,
EmrManagedSecurityGroup.Kind.SLAVE,
],
},
]
MANAGED_RULES_INGRESS = [
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.MASTER,
"from_port": 0,
"ip_protocol": "tcp",
"ip_ranges": [],
"to_port": 65535,
"source_group_ids": [
EmrManagedSecurityGroup.Kind.MASTER,
EmrManagedSecurityGroup.Kind.SLAVE,
],
},
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.MASTER,
"from_port": 8443,
"ip_protocol": "tcp",
"ip_ranges": [],
"to_port": 8443,
"source_group_ids": [EmrManagedSecurityGroup.Kind.SERVICE],
},
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.MASTER,
"from_port": 0,
"ip_protocol": "udp",
"ip_ranges": [],
"to_port": 65535,
"source_group_ids": [
EmrManagedSecurityGroup.Kind.MASTER,
EmrManagedSecurityGroup.Kind.SLAVE,
],
},
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.MASTER,
"from_port": -1,
"ip_protocol": "icmp",
"ip_ranges": [],
"to_port": -1,
"source_group_ids": [
EmrManagedSecurityGroup.Kind.MASTER,
EmrManagedSecurityGroup.Kind.SLAVE,
],
},
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.SLAVE,
"from_port": 0,
"ip_protocol": "tcp",
"ip_ranges": [],
"to_port": 65535,
"source_group_ids": [
EmrManagedSecurityGroup.Kind.SLAVE,
EmrManagedSecurityGroup.Kind.MASTER,
],
},
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.SLAVE,
"from_port": 8443,
"ip_protocol": "tcp",
"ip_ranges": [],
"to_port": 8443,
"source_group_ids": [EmrManagedSecurityGroup.Kind.SERVICE],
},
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.SLAVE,
"from_port": 0,
"ip_protocol": "udp",
"ip_ranges": [],
"to_port": 65535,
"source_group_ids": [
EmrManagedSecurityGroup.Kind.MASTER,
EmrManagedSecurityGroup.Kind.SLAVE,
],
},
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.SLAVE,
"from_port": -1,
"ip_protocol": "icmp",
"ip_ranges": [],
"to_port": -1,
"source_group_ids": [
EmrManagedSecurityGroup.Kind.MASTER,
EmrManagedSecurityGroup.Kind.SLAVE,
],
},
{
"group_name_or_id": EmrManagedSecurityGroup.Kind.SERVICE,
"from_port": 9443,
"ip_protocol": "tcp",
"ip_ranges": [],
"to_port": 9443,
"source_group_ids": [EmrManagedSecurityGroup.Kind.MASTER],
},
]
def __init__(self, ec2_backend, vpc_id):
self.ec2 = ec2_backend
self.vpc_id = vpc_id
def manage_security_groups(
self, master_security_group, slave_security_group, service_access_security_group
):
group_metadata = [
(
master_security_group,
EmrManagedSecurityGroup.Kind.MASTER,
EmrManagedMasterSecurityGroup,
),
(
slave_security_group,
EmrManagedSecurityGroup.Kind.SLAVE,
EmrManagedSlaveSecurityGroup,
),
(
service_access_security_group,
EmrManagedSecurityGroup.Kind.SERVICE,
EmrManagedServiceAccessSecurityGroup,
),
]
managed_groups = {}
for name, kind, defaults in group_metadata:
managed_groups[kind] = self._get_or_create_sg(name, defaults)
self._add_rules_to(managed_groups)
return (
managed_groups[EmrManagedSecurityGroup.Kind.MASTER],
managed_groups[EmrManagedSecurityGroup.Kind.SLAVE],
managed_groups[EmrManagedSecurityGroup.Kind.SERVICE],
)
def _get_or_create_sg(self, sg_id, defaults):
find_sg = self.ec2.get_security_group_by_name_or_id
create_sg = self.ec2.create_security_group
group_id_or_name = sg_id or defaults.group_name
group = find_sg(group_id_or_name, self.vpc_id)
if group is None:
if group_id_or_name != defaults.group_name:
raise ValueError(
"The security group '{}' does not exist".format(group_id_or_name)
)
group = create_sg(defaults.group_name, defaults.description(), self.vpc_id)
return group
def _add_rules_to(self, managed_groups):
rules_metadata = [
(self.MANAGED_RULES_EGRESS, self.ec2.authorize_security_group_egress),
(self.MANAGED_RULES_INGRESS, self.ec2.authorize_security_group_ingress),
]
for rules, add_rule in rules_metadata:
rendered_rules = self._render_rules(rules, managed_groups)
for rule in rendered_rules:
from moto.ec2.exceptions import InvalidPermissionDuplicateError
try:
add_rule(vpc_id=self.vpc_id, **rule)
except InvalidPermissionDuplicateError:
# If the rule already exists, we can just move on.
pass
@staticmethod
def _render_rules(rules, managed_groups):
rendered_rules = copy.deepcopy(rules)
for rule in rendered_rules:
rule["group_name_or_id"] = managed_groups[rule["group_name_or_id"]].id
rule["source_group_ids"] = [
managed_groups[group].id for group in rule["source_group_ids"]
]
return rendered_rules
| william-richard/moto | moto/emr/utils.py | Python | apache-2.0 | 15,072 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("GradientBoostingClassifier" , "FourClass_100" , "sqlite")
| antoinecarme/sklearn2sql_heroku | tests/classification/FourClass_100/ws_FourClass_100_GradientBoostingClassifier_sqlite_code_gen.py | Python | bsd-3-clause | 155 |
"""artlaasya urls"""
from django.conf.urls import patterns, url, include
from django.contrib import admin
from django.conf import settings
from artlaasya import views
admin.autodiscover()
# Dynamic page URL patterns.
urlpatterns = patterns('',
url(r'^$', views.home, name="v_home"),
url(r'^artist/(?P<artist_name>\b[a-z0-9\-]+\b)',
views.artist,
name="v_artist"),
url(r'^artists/(?P<artist_genre>\b[a-z\-]+\b)',
views.artists,
name="v_artists-genre"),
url(r'^artwork/(?P<artist_name>\b[a-z0-9\-]+\b)/(?P<artwork_title>\b[a-z0-9\-\'\(\.\)\!\?]+\b)',
views.artwork,
name="v_artwork"),
url(r'^artworks/(?P<artwork_genre>\b[a-z\-]+\b)',
views.artworks,
name="v_artworks-genre"),
url(r'^event/(?P<event_title>\b[a-z0-9\-\'\(\)\!\?]+\b)',
views.event,
name="v_event"),
url(r'^events/$',
views.events,
name="v_events"),
url(r'^learn/(?P<artwork_genre>\b[a-z\-]+\b)',
views.learn,
name="v_learn"),
url(r'^search/',
views.search.as_view(),
name="search"),
url(r'^searching/$',
views.searching,
name="v_searching"),
)
# Static page URL patterns.
urlpatterns += patterns('django.contrib.flatpages.views',
url(r'^contact/$',
'flatpage',
{'url': '/contact/'},
name='contact'),
url(r'^termsofuse/$',
'flatpage',
{'url': '/termsofuse/'},
name='termsofuse'),
url(r'^privacy/$',
'flatpage',
{'url': '/privacy/'},
name='privacy'),
url(r'^termsofsale/$',
'flatpage',
{'url': '/termsofsale/'},
name='termsofsale'),
)
# Sitemap tuple compiler.
sitemaps = (
{
'_static': views.StaticSitemap,
'_artists': views.ArtistSitemap,
'_artworks': views.ArtworkSitemap,
'_events': views.EventSitemap,
}
)
# Sitemap and admin page URL patterns.
urlpatterns += patterns('',
url(r'^admin/',
include(admin.site.urls)),
url(r'^sitemap\.xml$',
'django.contrib.sitemaps.views.sitemap',
{'sitemaps': sitemaps},
name='sitemap'),
)
# Debug toolbar URL patterns.
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
#EOF - artlaasya urls | davidjcox/artlaasya | artlaasya/urls.py | Python | bsd-3-clause | 2,367 |
import logging
import constants
import guid
class BucketFull(Exception):
"""Raised when the bucket is full."""
pass
class KBucket(object):
"""FILLME"""
def __init__(self, rangeMin, rangeMax, market_id):
"""
Initialize a new KBucket with a range and a market_id.
@param rangeMin: The lower boundary for the range in the ID space
covered by this KBucket.
@type: int
@param rangeMax: The upper boundary for the range in the ID space
covered by this KBucket.
@type: int
@param market_id: FILLME
"""
self.lastAccessed = 0
self.rangeMin = rangeMin
self.rangeMax = rangeMax
self.contacts = []
self.market_id = market_id
self.log = logging.getLogger(
'[%s] %s' % (market_id, self.__class__.__name__)
)
def __len__(self):
return len(self.contacts)
def __iter__(self):
return iter(self.contacts)
def addContact(self, contact):
"""
Add a contact to the contact list.
The new contact is always appended to the contact list after removing
any prior occurences of the same contact.
@param contact: The ID of the contact to add.
@type contact: guid.GUIDMixin or str or unicode
@raise node.kbucket.BucketFull: The bucket is full and the contact
to add is not already in it.
"""
if isinstance(contact, basestring):
contact = guid.GUIDMixin(contact)
try:
# Assume contact exists. Attempt to remove the old one...
self.contacts.remove(contact)
# ... and add the new one at the end of the list.
self.contacts.append(contact)
# The code above works as follows:
# Assume C1 is the existing contact and C2 is the new contact.
# Iff C1 is equal to C2, it will be removed from the list.
# Since Contact.__eq__ compares only GUIDs, contact C1 will
# be replaced even if it's not exactly the same as C2.
# This is the intended behaviour; the fresh contact may have
# updated add-on data (e.g. optimization-specific stuff).
except ValueError:
# The contact wasn't there after all, so add it.
if len(self.contacts) < constants.k:
self.contacts.append(contact)
else:
raise BucketFull('No space in bucket to insert contact')
def getContact(self, contactID):
"""
Return the contact with the specified ID or None if not present.
@param contactID: The ID to search.
@type contact: guid.GUIDMixin or str or unicode
@rtype: guid.GUIDMixin or None
"""
self.log.debug('[getContact] %s', contactID)
for contact in self.contacts:
if contact == contactID:
self.log.debug('[getContact] Found %s', contact)
return contact
self.log.debug('[getContact] No Results')
return None
def getContacts(self, count=-1, excludeContact=None):
"""
Return a list containing up to the first `count` number of contacts.
@param count: The amount of contacts to return;
if 0 or less, return all contacts.
@type count: int
@param excludeContact: A contact to exclude; if this contact is in
the list of returned values, it will be
discarded before returning. If a str is
passed as this argument, it must be the
contact's ID.
@type excludeContact: guid.GUIDMixin or str or unicode
@return: The first `count` contacts in the contact list.
This amount is capped by the available contacts
and the bucket size, of course. If no contacts
are present, an empty list is returned.
@rtype: list of guid.GUIDMixin
"""
currentLen = len(self)
if not currentLen:
return []
if count <= 0:
count = currentLen
else:
count = min(count, currentLen)
# Return no more contacts than bucket size.
count = min(count, constants.k)
contactList = self.contacts[:count]
if excludeContact is not None:
try:
# NOTE: If the excludeContact is removed, the resulting
# list has one less contact than expected. Not sure if
# this is a bug.
contactList.remove(excludeContact)
except ValueError:
self.log.debug(
'[kbucket.getContacts() warning] '
'tried to exclude non-existing contact (%s)',
excludeContact
)
return contactList
def removeContact(self, contact):
"""
Remove given contact from contact list.
@param contact: The ID of the contact to remove.
@type contact: guid.GUIDMixin or str or unicode
@raise ValueError: The specified contact is not in this bucket.
"""
self.contacts.remove(contact)
def keyInRange(self, key):
"""
Tests whether the specified node ID is in the range of the ID
space covered by this KBucket (in other words, it returns
whether or not the specified key should be placed in this KBucket.
@param key: The ID to test.
@type key: guid.GUIDMixin or hex or int
@return: True if key is in this KBucket's range, False otherwise.
@rtype: bool
"""
if isinstance(key, guid.GUIDMixin):
key = key.guid
if isinstance(key, basestring):
key = long(key, 16)
return self.rangeMin <= key < self.rangeMax
| kordless/OpenBazaar | node/kbucket.py | Python | mit | 5,992 |
#
# o o
# 8
# .oPYo. .oPYo. odYo. o8P o8 .oPYo. odYo. .oPYo. .oPYo.
# Yb.. 8oooo8 8' `8 8 8 8oooo8 8' `8 8 ' 8oooo8
# 'Yb. 8. 8 8 8 8 8. 8 8 8 . 8.
# `YooP' `Yooo' 8 8 8 8 `Yooo' 8 8 `YooP' `Yooo'
# :.....::.....:..::..::..::..:.....:..::..:.....::.....:
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# Copyright Yazan Obeidi, 2017
#
# python.conciousness.sentience - Self awareness and reflection
#
__author__ = 'yazan'
__version__ = '0.0.1'
__licence__ = 'Apache V2'
if __name__ = '__main__':
pass | yazanobeidi/sentience | src/python/conciousness/sentience.py | Python | apache-2.0 | 735 |
import os
import sys
class Module1(object):
pass
| ashishb/python_dep_generator | samples/module1.py | Python | mit | 51 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__="EIXXIE"
#将MOS文件夹添加到系统环境变量
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from sqlalchemy import orm,or_, and_, desc
from sqlalchemy import Table, Column, Integer, String
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from flask.ext.login import UserMixin
from functools import partial
from mos.utils.database import Base,engine,db_session
from mos.utils.functions import get_now_time,Bool2Info
from mos import db
#
#用户表定义
class User(UserMixin,Base):
__tablename__ = 'users'
id = db.Column('id', Integer, primary_key=True)
fullname = db.Column('fullname', String(50))
username = db.Column('username', String(20), unique=True)
password = db.Column('password', String(20))
email = db.Column('email', String(30))
comments = db.Column('comments', String(200))
is_admin = db.Column('is_admin', String(2))
is_active = db.Column('is_active', String(2))
lasttime = db.Column('lasttime', String(30))
#判断该用户是否属于某个组
def isingroup(self,group_id):
result =[]
conn = engine.connect()
sql = 'select * from uig where group_id = \'' + str(group_id) + '\' and user_id=\'' + str(self.id) +'\''
rs = conn.execute(sql)
for r in rs:
result.append(r)
if len(result)==0:
return False
else:
return True
#列出该用户所属的组的名称
def has_groups(self):
groups =[]
conn = engine.connect()
sql = 'select groups.id as groupid,groupname FROM groups CROSS JOIN uig where groups.id = uig.group_id and uig.user_id = \'' + \
str(self.id) + '\'' + 'order by groupname'
rs = conn.execute(sql)
for r in rs:
groups.append([r.groupid,r.groupname])
return groups
#将用户加入某个组
def joingroup(self,group_id):
if not self.isingroup(group_id):
conn = engine.connect()
sql = 'insert into uig(\'group_id\',\'user_id\',\'lasttime\') values(\'' + str(group_id) + '\',' + '\'' + str(self.id) +'\',' + '\'2012-03-31 20:58:40\')'
rs = conn.execute(sql)
return True
else:
return False
#将用户从某个组中移除
def leavegroup(self,group_id):
if self.isingroup(group_id):
conn = engine.connect()
sql = 'delete from uig where group_id = \'' + str(group_id) + '\' and user_id=\'' + str(self.id) +'\''
rs = conn.execute(sql)
return True
else:
return False
#列出该用户所属的权限
def has_auths(self):
userauths =[]
conn = engine.connect()
sql = 'select reponame,authitem,authtype from auth_users cross join users,repos,auth_items where auth_users.user_id=users.id and auth_users.authitem_id = auth_items.id and auth_items.repo_id = repos.id and users.id = \'' + str(self.id) +'\' order by reponame,authitem,authtype'
rs = conn.execute(sql)
for r in rs:
userauths.append([' ',r.reponame,r.authitem,r.authtype])
return userauths
def __init__(self, fullname=None, username=None, password=None, email=None,comments=None,is_admin=0,is_active=1,lasttime=None):
#self.id = id
self.fullname = fullname
self.username = username
self.password = password
self.email = email
self.comments = comments
self.is_admin = is_admin
self.is_active = is_active
self.lasttime = lasttime
def is_authenticated(self):
return True
def is_active(self):
return True
def is_admin(self):
return self.is_admin
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
#def groups(self):
# return db.session.query(Group).filter_by(id = self.id).all()
def __repr__(self):
return '%r,%r,%r,%r,%r,%r,%r,%r' % (self.fullname,self.username,self.password,self.email,self.comments,\
self.is_admin,self.is_active,self.lasttime)
def getRepo(columns=None):
r = Repo.query
if columns:
r = r.options(orm.load_only(*columns))
return r
def getRepoFactory(columns=None):
return partial(getRepo, columns=columns)
def getGroup(columns=None):
r = Group.query
if columns:
r = r.options(orm.load_only(*columns))
return r
def getGroupFactory(columns=None):
return partial(getGroup, columns=columns)
#用户组表定义
class Group(Base):
__tablename__ = 'groups'
id = db.Column('id', Integer, primary_key=True)
groupname = db.Column('groupname', String(20), unique=True)
comments = db.Column('comments', String(200))
status = db.Column('status', String(2))
lasttime = db.Column('lasttime', String(30))
#判断该组是否包含某个用户
def ishavauser(self,user_id):
result =[]
conn = engine.connect()
sql = 'select * from uig where user_id = \'' + str(user_id) + '\' and group_id=\'' + str(self.id) +'\''
rs = conn.execute(sql)
for r in rs:
result.append(r)
if len(result)==0:
return False
else:
return True
#列出该组所包含的用户的全名和登录名
def has_users(self):
users =[]
conn = engine.connect()
sql = 'select users.id as userid,fullname,username FROM users CROSS JOIN uig where users.id = uig.user_id and uig.group_id = \'' + \
str(self.id) + '\'' + 'order by username'
rs = conn.execute(sql)
for r in rs:
users.append([r.userid,r.fullname,r.username])
return users
#将某个用户加入该组
def joinuser(self,user_id):
if not self.ishavauser(user_id):
conn = engine.connect()
sql = 'insert into uig(\'group_id\',\'user_id\',\'lasttime\') values(\'' + str(self.id) + '\',' + '\'' + str(user_id) +'\',' + '\'2012-03-31 20:58:40\')'
rs = conn.execute(sql)
return True
else:
return False
#列出该用户组所有的权限
def has_auths(self):
groupauths =[]
conn = engine.connect()
sql = 'select groupname,reponame,authitem,authtype from auth_groups cross join groups,repos,auth_items where auth_groups.group_id=groups.id and auth_groups.authitem_id = auth_items.id and auth_items.repo_id = repos.id and groups.id = \'' + str(self.id) +'\' order by groupname,reponame,authitem,authtype'
rs = conn.execute(sql)
for r in rs:
groupauths.append([r.groupname,r.reponame,r.authitem,r.authtype])
return groupauths
def __init__(self, groupname=None, comments=None,status=1,lasttime=None):
#self.id = id
self.groupname = groupname
self.comments = comments
self.status = status
self.lasttime = lasttime
def is_active(self):
return True
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __repr__(self):
return '%r,%r,%r,%r' % (self.groupname,self.comments,self.status,self.lasttime)
#SVN库表定义
class Repo(Base):
__tablename__ = 'repos'
id = Column('id', Integer, primary_key=True)
reponame = Column('reponame', String(50))
comments = Column('comments', String(200))
is_active = Column('is_active', String(2))
lasttime = Column('lasttime', String(30))
#列出该库所包含的配置项
def has_Authitems(self):
authitems=[]
conn = engine.connect()
sql = 'select authitem FROM repos CROSS JOIN auth_items where auth_items.repo_id = repos.id and repos.id = \'' + \
str(self.id) + '\'' + 'order by authitem'
rs = conn.execute(sql)
for r in rs:
authitems.append([r.authitem])
return authitems
def __init__(self, reponame=None,comments=None,is_active=1,lasttime=None,authitems=None):
#self.id = id
self.reponame = reponame
self.comments = comments
self.is_active = is_active
self.lasttime = lasttime
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __repr__(self):
return '%r,%r,%r,%r,%r' % (self.reponame,self.comments,self.is_active,self.lasttime,self.authitems)
#配置项表定义
class AuthItem(Base):
__tablename__ = 'auth_items'
id = Column('id', Integer, primary_key=True)
repo_id = Column('repo_id', String(50))
authitem = Column('authitem', String(150))
#repo_id = db.Column(db.Integer, db.ForeignKey('repos.id'))
#判断是否包含某个用户的权限
def is_hasuser(self,user_id):
result = []
conn = engine.connect()
sql = 'select * from auth_users where authitem_id = \'' + str(self.id) + '\' and user_id= \'' + str(user_id) +'\''
rs = conn.execute(sql)
for r in rs:
result.append(r)
if len(result)==0:
return False
else:
return True
#判断是否包含某个组的权限
def is_hasgroup(self,group_id):
result = []
conn = engine.connect()
sql = 'select * from auth_groups where authitem_id = \'' + str(self.id) + '\' and group_id= \'' + str(group_id) +'\''
rs = conn.execute(sql)
for r in rs:
result.append(r)
if len(result)==0:
return False
else:
return True
#增加用户权限
def joinuser(self,user_id,authtype):
conn = engine.connect()
if not self.is_hasuser(user_id):
sql = 'insert into auth_users(\'authitem_id\',\'user_id\',\'authtype\',\'lasttime\') values(\'' +\
str(self.id) + '\',' + '\'' + str(user_id) +'\',' + '\'' + str(authtype) +'\',' + '\'2012-03-31 20:58:40\')'
else:
sql = 'update auth_users set authtype = \'' + str(authtype) +'\'' + ' where authitem_id = \'' + str(self.id) + '\' and user_id= \'' + str(user_id) +'\''
rs = conn.execute(sql)
#增加用户组权限
def joingroup(self,group_id,authtype):
conn = engine.connect()
if not self.is_hasgroup(group_id):
sql = 'insert into auth_groups(\'authitem_id\',\'group_id\',\'authtype\',\'lasttime\') values(\'' + str(self.id) + '\',' + '\'' + str(group_id) +'\',' + '\'' + str(authtype) +'\',' + '\'2012-03-31 20:58:40\')'
else:
sql = 'update auth_groups set authtype = \'' + str(authtype) +'\'' + ' where authitem_id = \'' + str(self.id) + '\' and group_id= \'' + str(group_id) +'\''
rs = conn.execute(sql)
#将用户组权限移除
def removeuser(self,user_id):
conn = engine.connect()
sql = 'delete from auth_users where user_id = \'' + str(user_id) + '\' and authitem_id=\'' + str(self.id) +'\''
rs = conn.execute(sql)
return True
#将用户组权限移除
def removegroup(self,group_id):
conn = engine.connect()
sql = 'delete from auth_groups where group_id = \'' + str(group_id) + '\' and authitem_id=\'' + str(self.id) +'\''
rs = conn.execute(sql)
return True
#列出该配置项所包含的用户认证
def has_authusers(self):
authusers=[]
conn = engine.connect()
sql = 'select users.id as userid,fullname,username,authtype from auth_users cross join users,\
repos,auth_items where auth_users.user_id=users.id and auth_users.authitem_id = auth_items.id and \
auth_items.repo_id = repos.id and auth_items.id = \'' + str(self.id) + '\'' + 'order by username'
rs = conn.execute(sql)
for r in rs:
authusers.append([r.userid,r.fullname,r.username,r.authtype])
return authusers
#列出该配置项所包含的用户组认证
def has_authgroups(self):
authgroups=[]
conn = engine.connect()
sql = 'select groups.id as groupid,groupname,authtype from auth_groups cross join groups,\
repos,auth_items where auth_groups.group_id=groups.id and auth_groups.authitem_id = auth_items.id and \
auth_items.repo_id = repos.id and auth_items.id = \'' + str(self.id) + '\'' + 'order by groupname'
rs = conn.execute(sql)
for r in rs:
authgroups.append([r.groupid,r.groupname,r.authtype])
return authgroups
def __init__(self, repo_id=None,authitem=None):
#self.id = id
self.repo_id = repo_id
self.authitem = authitem
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __repr__(self):
return '%r,%r' % (self.repo_id,self.authitem)
#权限表
class AuthPerm(Base):
__tablename__ = 'authperms'
id = Column('id', Integer, primary_key=True)
authitem_id = Column('authitem_id', Integer)
authtype = Column('authtype', String(10))
authdata = Column('authdata', String(20))
authperm = Column('authperm', String(5))
comments = Column('comments', String(200))
lasttime = Column('lasttime', String(30))
def __init__(self, authitem_id=None, authtype=None, authdata=None, authperm=None,comments=None,lasttime=None):
#self.id = id
self.authitem_id = authitem_id
self.authtype = authtype
self.authdata = authdata
self.authperm = authperm
self.comments = comments
self.lasttime = lasttime
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __repr__(self):
return '%r,%r,%r,%r,%r,%r' % (sself.authitem_id,self.authtype,self.authdata,self.authperm,self.comments,self.lasttime) | iotate/svnmaster | mos/models/tables.py | Python | bsd-3-clause | 14,567 |
# coding: utf-8
import json
import os
import re
from xml.sax import saxutils
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import get_storage_class
from django.urls import reverse
from django.db import models
from django.db.models.signals import post_save, post_delete
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy, ugettext as _
from guardian.shortcuts import (
assign_perm,
get_perms_for_model
)
from taggit.managers import TaggableManager
from onadata.apps.logger.fields import LazyDefaultBooleanField
from onadata.apps.logger.xform_instance_parser import XLSFormError
from onadata.koboform.pyxform_utils import convert_csv_to_xls
from onadata.libs.constants import (
CAN_ADD_SUBMISSIONS,
CAN_VALIDATE_XFORM,
CAN_DELETE_DATA_XFORM,
CAN_TRANSFER_OWNERSHIP,
)
from onadata.libs.models.base_model import BaseModel
from onadata.libs.utils.hash import get_hash
XFORM_TITLE_LENGTH = 255
title_pattern = re.compile(r"<h:title>([^<]+)</h:title>")
def upload_to(instance, filename):
return os.path.join(
instance.user.username,
'xls',
os.path.split(filename)[1])
class XForm(BaseModel):
CLONED_SUFFIX = '_cloned'
MAX_ID_LENGTH = 100
xls = models.FileField(upload_to=upload_to, null=True)
json = models.TextField(default='')
description = models.TextField(default='', null=True)
xml = models.TextField()
user = models.ForeignKey(User, related_name='xforms', null=True, on_delete=models.CASCADE)
require_auth = models.BooleanField(default=False)
shared = models.BooleanField(default=False)
shared_data = models.BooleanField(default=False)
downloadable = models.BooleanField(default=True)
encrypted = models.BooleanField(default=False)
id_string = models.SlugField(
editable=False,
verbose_name=ugettext_lazy("ID"),
max_length=MAX_ID_LENGTH
)
title = models.CharField(editable=False, max_length=XFORM_TITLE_LENGTH)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
last_submission_time = models.DateTimeField(blank=True, null=True)
has_start_time = models.BooleanField(default=False)
uuid = models.CharField(max_length=32, default='', db_index=True)
uuid_regex = re.compile(r'(<instance>.*?id="[^"]+">)(.*</instance>)(.*)',
re.DOTALL)
instance_id_regex = re.compile(r'<instance>.*?id="([^"]+)".*</instance>',
re.DOTALL)
uuid_node_location = 2
uuid_bind_location = 4
instances_with_geopoints = models.BooleanField(default=False)
num_of_submissions = models.IntegerField(default=0)
tags = TaggableManager()
has_kpi_hooks = LazyDefaultBooleanField(default=False)
kpi_asset_uid = models.CharField(max_length=32, null=True)
class Meta:
app_label = 'logger'
unique_together = (("user", "id_string"),)
verbose_name = ugettext_lazy("XForm")
verbose_name_plural = ugettext_lazy("XForms")
ordering = ("id_string",)
permissions = (
(CAN_ADD_SUBMISSIONS, _('Can make submissions to the form')),
(CAN_TRANSFER_OWNERSHIP, _('Can transfer form ownership.')),
(CAN_VALIDATE_XFORM, _('Can validate submissions')),
(CAN_DELETE_DATA_XFORM, _('Can delete submissions')),
)
def file_name(self):
return self.id_string + ".xml"
def url(self):
return reverse(
"download_xform",
kwargs={
"username": self.user.username,
"id_string": self.id_string
}
)
def data_dictionary(self):
from onadata.apps.viewer.models.data_dictionary import\
DataDictionary
return DataDictionary.objects.get(pk=self.pk)
@property
def has_instances_with_geopoints(self):
return self.instances_with_geopoints
@property
def kpi_hook_service(self):
"""
Returns kpi hook service if it exists. XForm should have only one occurrence in any case.
:return: RestService
"""
return self.restservices.filter(name="kpi_hook").first()
def _set_id_string(self):
matches = self.instance_id_regex.findall(self.xml)
if len(matches) != 1:
raise XLSFormError(_("There should be a single id string."))
self.id_string = matches[0]
def _set_title(self):
self.xml = smart_text(self.xml)
text = re.sub(r'\s+', ' ', self.xml)
matches = title_pattern.findall(text)
title_xml = matches[0][:XFORM_TITLE_LENGTH]
if len(matches) != 1:
raise XLSFormError(_("There should be a single title."), matches)
if self.title and title_xml != self.title:
title_xml = self.title[:XFORM_TITLE_LENGTH]
title_xml = saxutils.escape(title_xml)
self.xml = title_pattern.sub(
"<h:title>%s</h:title>" % title_xml, self.xml)
self.title = title_xml
def _set_description(self):
self.description = self.description \
if self.description and self.description != '' else self.title
def _set_encrypted_field(self):
if self.json and self.json != '':
json_dict = json.loads(self.json)
if 'submission_url' in json_dict and 'public_key' in json_dict:
self.encrypted = True
else:
self.encrypted = False
def update(self, *args, **kwargs):
super().save(*args, **kwargs)
def save(self, *args, **kwargs):
self._set_title()
self._set_description()
old_id_string = self.id_string
self._set_id_string()
self._set_encrypted_field()
# check if we have an existing id_string,
# if so, the one must match but only if xform is NOT new
if self.pk and old_id_string and old_id_string != self.id_string:
raise XLSFormError(
_("Your updated form's id_string '%(new_id)s' must match "
"the existing forms' id_string '%(old_id)s'." %
{'new_id': self.id_string, 'old_id': old_id_string}))
if getattr(settings, 'STRICT', True) and \
not re.search(r"^[\w-]+$", self.id_string):
raise XLSFormError(_('In strict mode, the XForm ID must be a '
'valid slug and contain no spaces.'))
super().save(*args, **kwargs)
def __str__(self):
return getattr(self, "id_string", "")
def submission_count(self, force_update=False):
if self.num_of_submissions == 0 or force_update:
count = self.instances.count()
self.num_of_submissions = count
self.save(update_fields=['num_of_submissions'])
return self.num_of_submissions
submission_count.short_description = ugettext_lazy("Submission Count")
def geocoded_submission_count(self):
"""Number of geocoded submissions."""
return self.instances.filter(geom__isnull=False).count()
def time_of_last_submission(self):
if self.last_submission_time is None and self.num_of_submissions > 0:
try:
last_submission = self.instances.latest("date_created")
except ObjectDoesNotExist:
pass
else:
self.last_submission_time = last_submission.date_created
self.save()
return self.last_submission_time
def time_of_last_submission_update(self):
try:
# We don't need to filter on `deleted_at` field anymore.
# Instances are really deleted and not flagged as deleted.
return self.instances.latest("date_modified").date_modified
except ObjectDoesNotExist:
pass
@property
def md5_hash(self):
return get_hash(self.xml)
@property
def can_be_replaced(self):
if hasattr(self.submission_count, '__call__'):
num_submissions = self.submission_count()
else:
num_submissions = self.submission_count
return num_submissions == 0
@classmethod
def public_forms(cls):
return cls.objects.filter(shared=True)
def _xls_file_io(self):
"""
Pulls the xls file from remote storage
this should be used sparingly
"""
file_path = self.xls.name
default_storage = get_storage_class()()
if file_path != '' and default_storage.exists(file_path):
with default_storage.open(file_path) as ff:
if file_path.endswith('.csv'):
return convert_csv_to_xls(ff.read())
else:
return BytesIO(ff.read())
@property
def settings(self):
"""
Mimic Asset settings.
:return: Object
"""
# As soon as we need to add custom validation statuses in Asset settings,
# validation in add_validation_status_to_instance
# (kobocat/onadata/apps/api/tools.py) should still work
default_validation_statuses = getattr(settings, "DEFAULT_VALIDATION_STATUSES", [])
# Later purpose, default_validation_statuses could be merged with a custom validation statuses dict
# for example:
# self._validation_statuses.update(default_validation_statuses)
return {
"validation_statuses": default_validation_statuses
}
def update_profile_num_submissions(sender, instance, **kwargs):
profile_qs = User.profile.get_queryset()
try:
profile = profile_qs.select_for_update()\
.get(pk=instance.user.profile.pk)
except ObjectDoesNotExist:
pass
else:
profile.num_of_submissions -= instance.num_of_submissions
if profile.num_of_submissions < 0:
profile.num_of_submissions = 0
profile.save(update_fields=['num_of_submissions'])
post_delete.connect(update_profile_num_submissions, sender=XForm,
dispatch_uid='update_profile_num_submissions')
def set_object_permissions(sender, instance=None, created=False, **kwargs):
if created:
for perm in get_perms_for_model(XForm):
assign_perm(perm.codename, instance.user, instance)
post_save.connect(set_object_permissions, sender=XForm,
dispatch_uid='xform_object_permissions')
| kobotoolbox/kobocat | onadata/apps/logger/models/xform.py | Python | bsd-2-clause | 10,599 |
"""Bosonic quantum operators."""
from warnings import warn
from sympy.core.compatibility import u
from sympy import Add, Mul, Pow, Integer, exp, sqrt, conjugate
from sympy.physics.quantum import Operator, Commutator, AntiCommutator, Dagger
from sympy.physics.quantum import HilbertSpace, FockSpace, Ket, Bra, IdentityOperator
from sympy.functions.special.tensor_functions import KroneckerDelta
__all__ = [
'BosonOp',
'BosonFockKet',
'BosonFockBra',
'BosonCoherentKet',
'BosonCoherentBra'
]
class BosonOp(Operator):
"""A bosonic operator that satisfies [a, Dagger(a)] == 1.
Parameters
==========
name : str
A string that labels the bosonic mode.
annihilation : bool
A bool that indicates if the bosonic operator is an annihilation (True,
default value) or creation operator (False)
Examples
========
>>> from sympy.physics.quantum import Dagger, Commutator
>>> from sympy.physics.quantum.boson import BosonOp
>>> a = BosonOp("a")
>>> Commutator(a, Dagger(a)).doit()
1
"""
@property
def name(self):
return self.args[0]
@property
def is_annihilation(self):
return bool(self.args[1])
@classmethod
def default_args(self):
return ("a", True)
def __new__(cls, *args, **hints):
if not len(args) in [1, 2]:
raise ValueError('1 or 2 parameters expected, got %s' % args)
if len(args) == 1:
args = (args[0], Integer(1))
if len(args) == 2:
args = (args[0], Integer(args[1]))
return Operator.__new__(cls, *args)
def _eval_commutator_BosonOp(self, other, **hints):
if self.name == other.name:
# [a^\dagger, a] = -1
if not self.is_annihilation and other.is_annihilation:
return Integer(-1)
elif 'independent' in hints and hints['independent']:
# [a, b] = 0
return Integer(0)
return None
def _eval_commutator_FermionOp(self, other, **hints):
return Integer(0)
def _eval_anticommutator_BosonOp(self, other, **hints):
if 'independent' in hints and hints['independent']:
# {a, b} = 2 * a * b, because [a, b] = 0
return 2 * self * other
return None
def _eval_adjoint(self):
return BosonOp(str(self.name), not self.is_annihilation)
def __mul__(self, other):
if other == IdentityOperator(2):
return self
if isinstance(other, Mul):
args1 = tuple(arg for arg in other.args if arg.is_commutative)
args2 = tuple(arg for arg in other.args if not arg.is_commutative)
x = self
for y in args2:
x = x * y
return Mul(*args1) * x
return Mul(self, other)
def _print_contents_latex(self, printer, *args):
if self.is_annihilation:
return r'{%s}' % str(self.name)
else:
return r'{{%s}^\dag}' % str(self.name)
def _print_contents(self, printer, *args):
if self.is_annihilation:
return r'%s' % str(self.name)
else:
return r'Dagger(%s)' % str(self.name)
def _print_contents_pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if self.is_annihilation:
return pform
else:
return pform**prettyForm(u('\N{DAGGER}'))
class BosonFockKet(Ket):
"""Fock state ket for a bosonic mode.
Parameters
==========
n : Number
The Fock state number.
"""
def __new__(cls, n):
return Ket.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonFockBra
@classmethod
def _eval_hilbert_space(cls, label):
return FockSpace()
def _eval_innerproduct_BosonFockBra(self, bra, **hints):
return KroneckerDelta(self.n, bra.n)
def _apply_operator_BosonOp(self, op, **options):
if op.is_annihilation:
return sqrt(self.n) * BosonFockKet(self.n - 1)
else:
return sqrt(self.n + 1) * BosonFockKet(self.n + 1)
class BosonFockBra(Bra):
"""Fock state bra for a bosonic mode.
Parameters
==========
n : Number
The Fock state number.
"""
def __new__(cls, n):
return Bra.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonFockKet
@classmethod
def _eval_hilbert_space(cls, label):
return FockSpace()
class BosonCoherentKet(Ket):
"""Coherent state ket for a bosonic mode.
Parameters
==========
alpha : Number, Symbol
The complex amplitude of the coherent state.
"""
def __new__(cls, alpha):
return Ket.__new__(cls, alpha)
@property
def alpha(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonCoherentBra
@classmethod
def _eval_hilbert_space(cls, label):
return HilbertSpace()
def _eval_innerproduct_BosonCoherentBra(self, bra, **hints):
if self.alpha == bra.alpha:
return Integer(1)
else:
return exp(-(abs(self.alpha)**2 + abs(bra.alpha)**2 - 2 * conjugate(bra.alpha) * self.alpha)/2)
def _apply_operator_BosonOp(self, op, **options):
if op.is_annihilation:
return self.alpha * self
else:
return None
class BosonCoherentBra(Bra):
"""Coherent state bra for a bosonic mode.
Parameters
==========
alpha : Number, Symbol
The complex amplitude of the coherent state.
"""
def __new__(cls, alpha):
return Bra.__new__(cls, alpha)
@property
def alpha(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonCoherentKet
def _apply_operator_BosonOp(self, op, **options):
if not op.is_annihilation:
return self.alpha * self
else:
return None
| beni55/sympy | sympy/physics/quantum/boson.py | Python | bsd-3-clause | 6,207 |
import sys, random, time
from lsst.sims.catalogs.generation.db import jobDB
def howManyJobs(eM, tableId):
tableStr = str(tableId)
t0 = eM.queryState(tableStr + 'NumJobs')
if t0 == None: t0 = 0
else: t0 = int(t0)
print 'howManyJobs: Current num: ', t0
return t0
def addJob(eM, tableId):
tableStr = str(tableId)
t0 = eM.queryState(tableStr + 'NumJobs')
if t0 == None: t0 = 0
print 'addJob: Current num: ', t0
t1 = int(t0) + 1
eM.updateState(tableStr + 'NumJobs', str(t1))
print 'addJob: New num: ', t1
eM.showStates()
def removeJob(eM, tableId):
tableStr = str(tableId)
t0 = eM.queryState(tableStr + 'NumJobs')
print 'addJob: Current num: ', t0
t1 = int(t0) - 1
eM.updateState(tableStr + 'NumJobs', str(t1))
print 'addJob: New num: ', t1
def throttle(eM, tableId, maxNumJobs, throttleTime):
print 'throttle: maxNumJobs is ', maxNumJobs
done = False
while done == False:
numJobs = howManyJobs(eM, tableId)
print 'throttle: numJobs is ', numJobs
if numJobs >= maxNumJobs:
print 'Max reached; sleeping...'
time.sleep(throttleTime)
print 'Waking to check again.'
else:
done = True
tableId = sys.argv[1]
print 'Using tableId: ', tableId
eM = jobDB.JobState(tableId)
for i in range(100):
howManyJobs(eM, tableId)
print '--------'
throttle(eM, tableId, 100, 60)
addJob(eM, tableId)
print '--------'
| lsst/sims_catalogs_generation | python/lsst/sims/catalogs/generation/deprecated/jobAllocator/myThrottle.py | Python | gpl-3.0 | 1,494 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2016 CERN.
#
# INSPIRE is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Results class to wrap a query and allow for searching."""
import json
from flask import current_app
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
def dotter(d, key, dots):
""" Given a json schema dictionary (d argument) returns all the properties
in a dotted notation.
e.g
author
author.full_name
author.affiliation
etc...
"""
if isinstance(d, dict):
if 'items' in d:
dots.append(key)
dotter(d['items'], key, dots)
elif 'properties' in d:
dotter(d['properties'], key, dots)
else:
for k in d:
dotter(d[k], key + '.' + k, dots)
else:
dots.append(key)
return dots
def get_dotted_keys(d, key, dots):
"""Removes undesirable information from extracted keywords."""
dotted_keys = dotter(d, key, dots)
return set(dotted_key[1:].rsplit('.', 1)[0] for dotted_key in dotted_keys)
@lru_cache(maxsize=1000)
def generate_valid_keywords():
"""Parses all sources that contain valid search keywords to a list."""
valid_keywords = []
keyword_mapping = current_app.config['SEARCH_ELASTIC_KEYWORD_MAPPING']
# Get keywords from configuration file
keywords = keyword_mapping.keys()
for k in keyword_mapping.values():
if isinstance(k, dict):
keywords += k.keys()
# Get keywords from the json schema
for path in current_app.extensions['invenio-jsonschemas'].list_schemas():
data = current_app.extensions['invenio-jsonschemas'].get_schema(path)
data = data.get('properties')
dotted_keywords = get_dotted_keys(data, '', [])
keywords += dotted_keywords
# Get keywords from elasticsearch mapping
for name, path in current_app.extensions['invenio-search'].mappings.iteritems():
with open(path) as data_file:
data = json.load(data_file)
data = data.get('mappings').get(name.split('-')[-1]).get('properties')
dotted_keywords = get_dotted_keys(data, '', [])
keywords += dotted_keywords
cleaned_keywords = list(set([k for k in keywords if k is not None]))
# Sort by longest string descending
cleaned_keywords.sort(key=len, reverse=True)
return cleaned_keywords
| jacenkow/inspire-next | inspirehep/modules/search/utils.py | Python | gpl-2.0 | 3,296 |
'''
Created on Jul 7, 2009
@author: Stou Sandalski (stou@icapsid.net)
@license: Public Domain
'''
import math
from OpenGL.GL import *
from OpenGL.GLU import *
from PyQt4 import QtGui
from PyQt4.QtOpenGL import *
class SpiralWidget(QGLWidget):
'''
Widget for drawing two spirals.
'''
def __init__(self, parent):
QGLWidget.__init__(self, parent)
self.setMinimumSize(500, 500)
def paintGL(self):
'''
Drawing routine
'''
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
# Draw the spiral in 'immediate mode'
# WARNING: You should not be doing the spiral calculation inside the loop
# even if you are using glBegin/glEnd, sin/cos are fairly expensive functions
# I've left it here as is to make the code simpler.
radius = 1.0
x = radius*math.sin(0)
y = radius*math.cos(0)
glColor(0.0, 1.0, 0.0)
glBegin(GL_LINE_STRIP)
for deg in xrange(1000):
glVertex(x, y, 0.0)
rad = math.radians(deg)
radius -= 0.001
x = radius*math.sin(rad)
y = radius*math.cos(rad)
glEnd()
glEnableClientState(GL_VERTEX_ARRAY)
spiral_array = []
# Second Spiral using "array immediate mode" (i.e. Vertex Arrays)
radius = 0.8
x = radius*math.sin(0)
y = radius*math.cos(0)
glColor(1.0, 0.0, 0.0)
for deg in xrange(820):
spiral_array.append([x, y])
rad = math.radians(deg)
radius -= 0.001
x = radius*math.sin(rad)
y = radius*math.cos(rad)
glVertexPointerf(spiral_array)
glDrawArrays(GL_LINE_STRIP, 0, len(spiral_array))
glFlush()
def resizeGL(self, w, h):
'''
Resize the GL window
'''
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
def initializeGL(self):
'''
Initialize GL
'''
# set viewing projection
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
# You don't need anything below this
class SpiralWidgetDemo(QtGui.QMainWindow):
''' Example class for using SpiralWidget'''
def __init__(self):
QtGui.QMainWindow.__init__(self)
widget = SpiralWidget(self)
self.setCentralWidget(widget)
if __name__ == '__main__':
app = QtGui.QApplication(['Spiral Widget Demo'])
window = SpiralWidgetDemo()
window.show()
app.exec_()
| JiangXL/ColorMapping | opengl.py | Python | gpl-3.0 | 2,720 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012 Canonical
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
"""Tests for the TextInput Qml component."""
from autopilot.matchers import Eventually
from textwrap import dedent
from testtools.matchers import Is, Not, Equals
from testtools import skip
import os
from tavastia.tests import TavastiaTestCase
class TextFieldTests(TavastiaTestCase):
"""Tests for TextField component."""
test_qml_file = "%s/%s.qml" % (os.path.dirname(os.path.realpath(__file__)),"TextFieldTests")
def test_can_select_textfield(self):
"""Must be able to select the Qml TextField component."""
obj = self.app.select_single('TextField')
self.assertThat(obj, Not(Is(None)))
| LeoTestard/qt-ubuntu-components | tests/autopilot/tavastia/tests/textfield/test_textfield.py | Python | lgpl-3.0 | 923 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Benjamin Jolivot <bjolivot@gmail.com>
# Inspired by slack module :
# # (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
# # (c) 2016, René Moser <mail@renemoser.net>
# # (c) 2015, Stefan Berggren <nsg@nsg.cc>
# # (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>)
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
module: mattermost
short_description: Send Mattermost notifications
description:
- Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration.
version_added: "2.3"
author: "Benjamin Jolivot (@bjolivot)"
options:
url:
description:
- Mattermost url (i.e. http://mattermost.yourcompany.com).
required: true
api_key:
description:
- Mattermost webhook api key. Log into your mattermost site, go to
Menu -> Integration -> Incomming Webhook -> Add Incomming Webhook.
This will give you full URL. api_key is the last part.
http://mattermost.example.com/hooks/C(API_KEY)
required: true
text:
description:
- Text to send. Note that the module does not handle escaping characters.
required: true
channel:
description:
- Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key).
username:
description:
- This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc.
default: Ansible
icon_url:
description:
- Url for the message sender's icon.
default: https://www.ansible.com/favicon.ico
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
default: yes
choices:
- 'yes'
- 'no'
"""
EXAMPLES = """
- name: Send notification message via Mattermost
mattermost:
url: http://mattermost.example.com
api_key: my_api_key
text: '{{ inventory_hostname }} completed'
- name: Send notification message via Slack all options
mattermost:
url: http://mattermost.example.com
api_key: my_api_key
text: '{{ inventory_hostname }} completed'
channel: notifications
username: 'Ansible on {{ inventory_hostname }}'
icon_url: http://www.example.com/some-image-file.png
"""
RETURN = '''
payload:
description: Mattermost payload
returned: success
type: string
webhook_url:
description: URL the webhook is sent to
returned: success
type: string
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec = dict(
url = dict(type='str', required=True),
api_key = dict(type='str', required=True, no_log=True),
text = dict(type='str', required=True),
channel = dict(type='str', default=None),
username = dict(type='str', default='Ansible'),
icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'),
validate_certs = dict(default='yes', type='bool'),
)
)
#init return dict
result = dict(changed=False, msg="OK")
#define webhook
webhook_url = "{0}/hooks/{1}".format(module.params['url'],module.params['api_key'])
result['webhook_url'] = webhook_url
#define payload
payload = { }
for param in ['text', 'channel', 'username', 'icon_url']:
if module.params[param] is not None:
payload[param] = module.params[param]
payload=module.jsonify(payload)
result['payload'] = payload
#http headers
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
#notes:
#Nothing is done in check mode
#it'll pass even if your server is down or/and if your token is invalid.
#If someone find good way to check...
#send request if not in test mode
if module.check_mode is False:
response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload)
#somthing's wrong
if info['status'] != 200:
#some problem
result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg'])
module.fail_json(**result)
#Looks good
module.exit_json(**result)
if __name__ == '__main__':
main()
| grimmjow8/ansible | lib/ansible/modules/notification/mattermost.py | Python | gpl-3.0 | 5,276 |
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2008-2015
# Christian Kohlöffel
# Vinzenz Schulz
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
from __future__ import absolute_import
from __future__ import division
from math import atan2
import logging
from core.point import Point
from core.arcgeo import ArcGeo
from core.linegeo import LineGeo
from dxfimport.biarc import BiarcClass
logger = logging.getLogger("DxfImport.SplineConvert")
debug_on = False
class Spline2Arcs:
def __init__(self, degree=0, Knots=[], Weights=[], CPoints=[], tol=0.01, check=1):
# Max Abweichung f�r die Biarc Kurve
self.epsilon = tol
self.epsilon_high = self.epsilon * 0.1
self.segments = 50
# NURBS Klasse initialisieren
self.NURBS = NURBSClass(degree=degree, Knots=Knots,
CPoints=CPoints, Weights=Weights)
# �berpr�fen der NURBS Parameter �berpr�fung der NURBS Kontrollpunkte ob welche doppelt
# Innerhalb der gegebenen Tolerans sind (=> Ignorieren)
self.NURBS.check_NURBSParameters(tol, check)
if debug_on:
logger.debug(self.NURBS)
logger.debug("Next High accurancy BiarCurve")
# High Accuracy Biarc fitting of NURBS
BiarcCurves, self.PtsVec = self.calc_high_accurancy_BiarcCurve()
logger.debug("Next Analyse and Compress")
# Komprimieren der Biarc und der Linien
self.Curve = self.analyse_and_compress(BiarcCurves)
def analyse_and_compress(self, BiarcCurves):
"""
analyse_and_compess() - Compress all to one curve
"""
Curves = []
for BiarcCurve in BiarcCurves:
Curve = []
for Biarc in BiarcCurve:
for geo in Biarc.geos:
Curve.append(geo)
# print ("Vor Linie: Elemente: %0.0f" %len(Curve))
Curve = self.compress_lines(Curve)
# print ("Nach Linie: Elemente: %0.0f" %len(Curve))
Curve = self.compress_biarcs(Curve)
# print ("Nach Biarc: Elemente: %0.0f" %len(Curve))
Curves += Curve
return Curves
def compress_biarcs(self, Curves):
"""
compress_biarcs()
"""
NewCurve = []
tau = self.epsilon
Pts = []
# Schleife f�r die Anzahl der Geometrirs
for geo in Curves:
NewCurve.append(geo)
# Wenn die L�nge mindestens 3 sind
if len(NewCurve) >= 3:
# Steigende Spirale
if isinstance(NewCurve[-3], ArcGeo)\
and isinstance(NewCurve[-2], ArcGeo)\
and isinstance(NewCurve[-1], ArcGeo):
Pts.append(geo.Pe)
if NewCurve[-3].r <= NewCurve[-2].r <= NewCurve[-1].r\
and NewCurve[-3].ext * NewCurve[-2].ext >= 0.0\
and NewCurve[-2].ext * NewCurve[-1].ext >= 0.0:
# print "Increasing"
anz = len(NewCurve)
triarc = NewCurve[anz - 3:anz]
Arc0, Arc1 = self.fit_triac_by_inc_biarc(triarc, tau)
diff = self.check_diff_to_pts(Pts, Arc0, Arc1)
# �berpr�fen ob es in Toleranz liegt
try:
if max(diff) < self.epsilon:
tau = self.calc_active_tolerance_inc(self.epsilon, triarc, Arc0, Arc1)
del NewCurve[anz - 3:anz]
NewCurve.append(Arc0)
NewCurve.append(Arc1)
except: # TODO remove except
pass
elif NewCurve[-3].r > NewCurve[-2].r > NewCurve[-1].r\
and NewCurve[-3].ext * NewCurve[-2].ext >= 0.0\
and NewCurve[-2].ext * NewCurve[-1].ext >= 0.0:
# print "Decreasing"
anz = len(NewCurve)
triarc = NewCurve[anz - 3:anz]
Arc0, Arc1 = self.fit_triac_by_dec_biarc(triarc, tau)
diff = self.check_diff_to_pts(Pts, Arc1, Arc0)
try:
if max(diff) < self.epsilon:
tau = self.calc_active_tolerance_dec(self.epsilon, triarc, Arc0, Arc1)
del NewCurve[anz - 3:anz]
NewCurve.append(Arc0)
NewCurve.append(Arc1)
except: # TODO remove except
pass
else:
Pts = []
return NewCurve
def calc_active_tolerance_inc(self, tau, arc, Arc0, Arc1):
"""
calc_active_tolerance_inc()
"""
V0 = (arc[0].O - arc[0].Ps).unit_vector()
Vb = (Arc1.O - Arc1.Ps).unit_vector()
t_ = (2 * arc[0].r * tau + pow(tau, 2)) / \
(2 * (arc[0].r + (arc[0].r + tau) * V0 * Vb))
te = arc[0].r + t_ - (Arc0.Pe - (arc[0].O + (t_ * V0))).length()
tm = arc[1].O.distance(Arc0.Pe) - abs(arc[1].r)
if tm < 0.0:
tf = tau
else:
tf = tau - tm
# print("tm: %0.3f; te: %0.3f; tau: %0.3f" %(tm,te,tau))
epsilon = min([te, tf, tau])
if epsilon < 0.0:
epsilon = 0.0
return epsilon
def calc_active_tolerance_dec(self, tau, arc, Arc0, Arc1):
"""
calc_active_tolerance_dec()
"""
# TODO why does it differ with calc_active_tolerance_inc
# V0 = (arc[2].O - arc[2].Ps).unit_vector()
# Vb = (Arc1.O - Arc1.Ps).unit_vector()
# t_ = (2 * arc[2].r * tau + pow(tau, 2)) / \
# (2 * (arc[2].r + (arc[2].r + tau) * V0 * Vb))
# te = arc[2].r + t_ - (Arc0.Pe - (arc[2].O + (t_ * V0))).length()
te = tau
tm = -arc[1].O.distance(Arc0.Pe) + abs(arc[1].r)
if tm < 0.0:
tf = tau
else:
tf = tau - tm
# print("tm: %0.3f; tf: %0.3f; te: %0.3f; tau: %0.3f" %(tm,tf,te,tau))
epsilon = min([te, tf, tau])
if epsilon < 0.0:
epsilon = 0.0
return epsilon
def fit_triac_by_inc_biarc(self, arc, eps):
"""
fit_triac_by_inc_biarc()
"""
# Errechnen von tb
V0 = (arc[0].O - arc[0].Ps).unit_vector()
V2 = (arc[2].O - arc[2].Pe).unit_vector()
# Errechnen der Hilfgr�ssen
t0 = (arc[2].r - arc[0].r)
D = (arc[2].O - arc[0].O)
X0 = (t0 * t0) - (D * D)
X1 = 2 * (D * V0 - t0)
Y0 = 2 * (t0 - D * V2)
Y1 = 2 * (V0 * V2 - 1)
# Errechnen von tb
tb = (pow((arc[1].r - arc[0].r + eps), 2) - ((arc[1].O - arc[0].O) * (arc[1].O - arc[0].O))) / \
(2 * (arc[1].r - arc[0].r + eps + (arc[1].O - arc[0].O) * V0))
# Errechnen von tc
tc = (pow(t0, 2) - (D * D)) / (2 * (t0 - D * V0))
# Auswahl von t
t = min([tb, tc])
# Errechnen von u
u = (X0 + X1 * t) / (Y0 + Y1 * t)
# Errechnen der neuen Arcs
Oa = arc[0].O + t * V0
ra = arc[0].r + t
Ob = arc[2].O - u * V2
rb = arc[2].r - u
Vn = (Oa - Ob).unit_vector()
Pn = Oa + ra * Vn
Arc0 = ArcGeo(Ps=arc[0].Ps, Pe=Pn, O=Oa, r=ra, direction=arc[0].ext)
Arc1 = ArcGeo(Ps=Pn, Pe=arc[2].Pe, O=Ob, r=rb, direction=arc[2].ext)
# print('\nAlte')
# print arc[0]
# print arc[1]
# print arc[2]
# print("tb: %0.3f; tc: %0.3f; t: %0.3f; u: %0.3f" %(tb,tc,t,u))
# print 'Neue'
# print Arc0
# print Arc1
return Arc0, Arc1
def fit_triac_by_dec_biarc(self, arc, eps):
"""
fit_triac_by_dec_biarc()
"""
V0 = (arc[2].O - arc[2].Pe).unit_vector()
V2 = (arc[0].O - arc[0].Ps).unit_vector()
# Errechnen der Hilfgr�ssen
t0 = (arc[0].r - arc[2].r)
D = (arc[0].O - arc[2].O)
X0 = (t0 * t0) - (D * D)
X1 = 2 * (D * V0 - t0)
Y0 = 2 * (t0 - D * V2)
Y1 = 2 * (V0 * V2 - 1)
# Errechnen von tb
tb = (pow((arc[1].r - arc[2].r + eps), 2) - ((arc[1].O - arc[2].O) * (arc[1].O - arc[2].O))) / \
(2 * (arc[1].r - arc[2].r + eps + (arc[1].O - arc[2].O) * V0))
# Errechnen von tc
tc = (pow(t0, 2) - (D * D)) / (2 * (t0 - D * V0))
# Auswahl von t
t = min([tb, tc])
# Errechnen von u
u = (X0 + X1 * t) / (Y0 + Y1 * t)
# Errechnen der neuen Arcs
Oa = arc[0].O - u * V2
ra = arc[0].r - u
Ob = arc[2].O + t * V0
rb = arc[2].r + t
Vn = (Ob - Oa).unit_vector()
Pn = Ob + rb * Vn
Arc0 = ArcGeo(Ps=arc[0].Ps, Pe=Pn, O=Oa, r=ra, \
s_ang=Oa.norm_angle(arc[0].Ps), e_ang=Oa.norm_angle(Pn),
direction=arc[0].ext)
Arc1 = ArcGeo(Ps=Pn, Pe=arc[2].Pe, O=Ob, r=rb, \
s_ang=Ob.norm_angle(Pn), e_ang=Ob.norm_angle(arc[2].Pe),
direction=arc[2].ext)
return Arc0, Arc1
def check_diff_to_pts(self, Pts, Arc0, Arc1):
"""
check_diff_to_pts()
"""
diff = []
for Pt in Pts:
w0 = Arc0.O.norm_angle(Pt)
w1 = Arc1.O.norm_angle(Pt)
if (w0 >= min([Arc0.s_ang, Arc0.e_ang]))and\
(w0 <= max([Arc0.s_ang, Arc0.e_ang])):
diff.append(abs(Arc0.O.distance(Pt) - abs(Arc0.r)))
elif (w1 >= min([Arc1.s_ang, Arc1.e_ang]))and\
(w1 <= max([Arc1.s_ang, Arc1.e_ang])):
diff.append(abs(Arc1.O.distance(Pt) - abs(Arc1.r)))
else:
del Pts[Pts.index(Pt)]
return diff
def compress_lines(self, Curve):
"""
compress_lines()
"""
NewCurve = []
Pts = []
for geo in Curve:
NewCurve.append(geo)
anz = len(NewCurve)
if anz >= 2:
# Wenn Geo eine Linie ist anh�ngen und �berpr�fen
if isinstance(NewCurve[-2], LineGeo) and isinstance(NewCurve[-1], LineGeo):
Pts.append(geo.Pe)
JointLine = LineGeo(NewCurve[-2].Ps, NewCurve[-1].Pe)
# �berpr�fung der Abweichung
res = []
for Point in Pts:
res.append(JointLine.distance_l_p(Point))
# print res
# Wenn die Abweichung OK ist Vorheriges anh�ngen
if max(res) < self.epsilon:
anz = len(NewCurve)
del NewCurve[anz - 2:anz]
NewCurve.append(JointLine)
points = [geo.Pe]
# Wenn nicht nicht anh�ngen und Pts zur�cksetzen
else:
Pts = [geo.Pe]
# Wenn es eines eine andere Geometrie als eine Linie ist
else:
Pts = []
return NewCurve
def calc_high_accurancy_BiarcCurve(self):
"""
calc_high_accurancy_BiarcCurve()
"""
# Berechnen der zu Berechnenden getrennten Abschnitte
u_sections = self.calc_u_sections(self.NURBS.Knots,
self.NURBS.ignor,
self.NURBS.knt_m_change[:])
# Step mu� ungerade sein, sonst gibts ein Rundungsproblem um 1
self.max_step = float(self.NURBS.Knots[-1] / (float(self.segments)))
# Berechnen des ersten Biarcs f�rs Fitting
BiarcCurves = []
PtsVecs = []
# Schleife f�r die einzelnen Abschnitte
for u_sect in u_sections:
if debug_on:
logger.debug("Calculation Biarc Section: %s" % u_sect)
BiarcCurve, PtsVec = self.calc_Biarc_section(u_sect, self.epsilon, self.epsilon_high)
BiarcCurves.append(BiarcCurve)
PtsVecs.append(PtsVec)
return BiarcCurves, PtsVecs
def calc_u_sections(self, Knots, ignor, unsteady):
"""
calc_u_sections()
"""
# Initialisieren
u_sections = []
# Abfrage ob bereits der Anfang ignoriert wird
# u_beg = Knots[0]
u_end = Knots[0]
ig_nr = 0
# Schleife bis u_end==Knots[0]
while u_end < Knots[-1]:
u_beg = u_end
# Wenn Ignor == Start dann Start = Ende von Ignor
if len(ignor) > ig_nr:
if u_beg == ignor[ig_nr][0]:
u_beg = ignor[ig_nr][1]
ig_nr += 1
# L�schen der unsteadys bis gr��er als u_beg
while (len(unsteady) > 0)and(unsteady[0] <= u_beg):
del(unsteady[0])
# Wenn Ignor noch mehr beiinhaltet dann Ignor Anfang = Ende
if len(ignor) > ig_nr:
u_end = ignor[ig_nr][0]
else:
u_end = Knots[-1]
if len(unsteady) > 0 and unsteady[0] < u_end:
u_end = unsteady[0]
del(unsteady[0])
# Solange u_beg nicht das Ende ist anh�ngen
if u_beg != u_end:
u_sections.append([u_beg, u_end])
return u_sections
def calc_Biarc_section(self, u_sect, nom_tol, max_tol):
"""
calc_Biarc_section()
"""
#max_tol=0.1
#print(max_tol)
min_u = 1e-12
BiarcCurve = []
cur_step = self.max_step
u = u_sect[0] + min_u
PtsVec = [self.NURBS.NURBS_evaluate(n=1, u=u)]
step = 0
# Berechnen bis alle Biarcs berechnet sind
while u < u_sect[-1] - min_u:
step += 1
# logger.debug(step)
u += cur_step
# Begrenzung von u auf den Maximalwert
if u > u_sect[-1]:
cur_step = u_sect[-1] - (u - cur_step) - min_u
u = u_sect[-1] - min_u
PtVec = self.NURBS.NURBS_evaluate(n=1, u=u)
# Aus den letzten 2 Punkten den n�chsten Biarc berechnen
Biarc = (BiarcClass(PtsVec[-1][0], PtsVec[-1][1], PtVec[0], PtVec[1], nom_tol * 0.5))
if Biarc.shape == "Zero":
# print("zero")
# self.cur_step = min([cur_step * 2, self.max_step])
cur_step = min([cur_step * 2, self.max_step])
elif Biarc.shape == "LineGeo":
# print("LineGeo")
BiarcCurve.append(Biarc)
cur_step = min([cur_step * 2, self.max_step])
PtsVec.append(PtVec)
else:
if self.check_biarc_fitting_tolerance(Biarc, max_tol, u - cur_step, u):
# print("fit1")
PtsVec.append(PtVec)
BiarcCurve.append(Biarc)
cur_step = min([cur_step / 0.7, self.max_step])
else:
# print("else")
u -= cur_step
cur_step *= 0.7
# print cur_step
if step > 10000:
raise ValueError("Iterations above 10000 reduce tolerance")
return BiarcCurve, PtsVec
def check_biarc_fitting_tolerance(self, Biarc, epsilon, u0, u1):
"""
check_biarc_fitting_tolerance()
"""
check_step = (u1 - u0) / 5
check_u = []
check_Pts = []
fit_error = []
for i in range(1, 5):
check_u.append(u0 + check_step * i)
check_Pts.append(self.NURBS.NURBS_evaluate(n=0, u=check_u[-1]))
fit_error.append(Biarc.get_biarc_fitting_error(check_Pts[-1]))
# if debug_on:
if 0:
logger.debug('u0: %s' % u0)
logger.debug('u1: %s' % u1)
logger.debug('Biarc: %s' % Biarc)
logger.debug(check_Pts)
logger.debug('check_Pts: %s %s %s %s' % (check_Pts[0], check_Pts[1], check_Pts[2], check_Pts[3]))
logger.debug('fit_error: %s' % fit_error)
if max(fit_error) >= epsilon:
return 0
else:
return 1
class NURBSClass:
def __init__(self, degree=0, Knots=[], Weights=None, CPoints=None):
self.degree = degree # Spline degree
self.Knots = Knots # Knoten Vektor
self.CPoints = CPoints # Kontrollpunkte des Splines [2D]
self.Weights = Weights # Gewichtung der Einzelnen Punkte
# Initialisieren von errechneten Gr��en
self.HCPts = [] # Homogenepunkte Vektoren [3D]
# Punkte in Homogene Punkte umwandeln
self.CPts_2_HCPts()
# Erstellen der BSplineKlasse zur Berechnung der Homogenen Punkte
self.BSpline = BSplineClass(degree=self.degree,
Knots=self.Knots,
CPts=self.HCPts)
def __str__(self):
"""
Standard method to print the object
@return: A string
"""
return "\ndegree: %s" % self.degree +\
"\nKnots: %s" % self.Knots +\
"\nCPoints: %s" % self.CPoints +\
"\nWeights: %s" % self.Weights +\
"\nHCPts: %s" % self.HCPts
def check_NURBSParameters(self, tol=1e-6, check=1):
"""
check_NURBSParameters()
"""
# �berpr�fen des Knotenvektors
# Suchen von mehrfachen Knotenpunkte (Anzahl �ber degree+1 => Fehler?!)
knt_nr = 1
knt_vec = [[self.Knots[0]]]
self.knt_m_change = []
self.ignor = []
if check == 1 or check == 3:
while knt_nr < len(self.Knots):
if self.Knots[knt_nr] == knt_vec[-1][-1]:
knt_vec[-1].append(self.Knots[knt_nr])
else:
knt_vec.append([self.Knots[knt_nr]])
knt_nr += 1
logger.debug("Checking Knots: %s" %knt_vec)
for knt_spts in knt_vec:
if len(knt_spts) > self.degree + 1:
raise ValueError("Same Knots Nr. bigger then degree+1")
# �berpr�fen der Steigungdifferenz vor und nach dem Punkt wenn Mehrfachknoten
elif len(knt_spts) >= self.degree and\
knt_vec[0][0] < knt_spts[-1] < knt_vec[-1][-1]:
temp, tangent0 = self.NURBS_evaluate(n=1, u=knt_spts[0] - 1e-12)
temp, tangent1 = self.NURBS_evaluate(n=1, u=knt_spts[0])
if abs(tangent0 - tangent1) > 1e-6:
self.knt_m_change.append(knt_spts[0])
logger.debug("Nots with change of direction: %s" %self.knt_m_change)
# �berpr�fen der Kontrollpunkte
# Suchen von mehrachen Kontrollpunkten (Anzahl �ber degree+2 => nicht errechnen
if check == 2 or check == 3:
ctlpt_nr = 0
ctlpt_vec = [[ctlpt_nr]]
while ctlpt_nr < len(self.CPoints) - 1:
ctlpt_nr += 1
if self.CPoints[ctlpt_nr].within_tol(self.CPoints[ctlpt_vec[-1][-1]], tol):
ctlpt_vec[-1].append(ctlpt_nr)
else:
ctlpt_vec.append([ctlpt_nr])
for same_ctlpt in ctlpt_vec:
if len(same_ctlpt) > self.degree + 1:
self.ignor.append([self.Knots[same_ctlpt[0] + self.degree // 2],
self.Knots[same_ctlpt[-1] + self.degree // 2]])
# raise ValueError, "Same Controlpoints Nr. bigger then degree+1"
# logger.debug("Same Controlpoints Nr. bigger then degree+2")
for ignor in self.ignor:
logger.debug("Ignoring u's between u: %s and u: %s" % (ignor[0], ignor[1]))
if len(self.knt_m_change):
logger.debug("Non steady Angles between Knots: %s" % self.knt_m_change)
def calc_curve(self, n=0, cpts_nr=20):
"""
calc_curve()
Berechnen von eine Anzahl gleichm�ssig verteilter Punkte und bis zur ersten Ableitung
"""
# Anfangswerte f�r Step und u
u = 0; Points = []; tang = []
step = self.Knots[-1] / (cpts_nr - 1)
while u <= 1.0:
Pt, tangent = self.NURBS_evaluate(n=n, u=u)
Points.append(Pt)
# F�r die erste Ableitung wird den Winkel der tangente errechnet
if n >= 1:
tang.append(tangent)
u += step
if n >= 1:
return Points, tang
else:
return Points
def NURBS_evaluate(self, n=0, u=0):
"""
Berechnen eines Punkts des NURBS und der ersten Ableitung
"""
# Errechnen der korrigierten u's
# cor_u=self.correct_u(u)
# logger.debug("Bin da")
# Errechnen der Homogenen Punkte bis zur n ten Ableitung
HPt = self.BSpline.bspline_ders_evaluate(n=n, u=u)
# logger.debug(HPt)
# Punkt wieder in Normal Koordinaten zur�ck transformieren
Point = self.HPt_2_Pt(HPt[0])
# logger.debug(HPt)
# Errechnen der ersten Ableitung wenn n>0 als Richtungsvektor
dPt = []
if n > 0:
# w(u)*A'(u)-w'(u)*A(u)
# dPt=---------------------
# w(u)^2
for j in range(len(HPt[0]) - 1):
dPt.append((HPt[0][-1] * HPt[1][j] - HPt[1][-1] * HPt[0][j]) /
pow(HPt[0][-1], 2))
# Berechnen des Winkels des Vektors
tangent = atan2(dPt[1], dPt[0])
return Point, tangent
else:
return Point
def CPts_2_HCPts(self):
"""
Umwandeln der NURBS Kontrollpunkte und Weight in einen Homogenen Vektor
"""
for P_nr in range(len(self.CPoints)):
HCPtVec = [self.CPoints[P_nr].x * self.Weights[P_nr], \
self.CPoints[P_nr].y * self.Weights[P_nr], \
self.Weights[P_nr]]
self.HCPts.append(HCPtVec[:])
def HPt_2_Pt(self, HPt):
"""
Umwandeln eines Homogenen PunktVektor in einen Punkt
"""
return Point(x=HPt[0] / HPt[-1], y=HPt[1] / HPt[-1])
class BSplineClass:
def __init__(self, degree=0, Knots=[], CPts=[]):
self.degree = degree
self.Knots = Knots
self.CPts = CPts
self.Knots_len = len(self.Knots)
self.CPt_len = len(self.CPts[0])
self.CPts_len = len(self.CPts)
# Eingangspr�fung, ober KnotenAnzahl usw. passt
if self.Knots_len < self.degree + 1:
raise ValueError("degree greater than number of control points.")
if self.Knots_len != (self.CPts_len + self.degree + 1):
logger.error("shall be: %s" % (self.CPts_len + self.degree + 1))
logger.error("is: %s" % self.Knots_len)
raise ValueError("Knot/Control Point/degree number error.")
def calc_curve(self, n=0, cpts_nr=20):
"""
Berechnen von eine Anzahl gleichm�ssig verteilter Punkte bis zur n-ten Ableitung
"""
# Anfangswerte f�r Step und u
u = 0
step = float(self.Knots[-1]) / (cpts_nr - 1)
Points = []
# Wenn die erste Ableitung oder h�her errechnet wird die ersten
# Ableitung in dem tan als Winkel in rad gespeichert
tang = []
while u <= self.Knots[-1]:
CK = self.bspline_ders_evaluate(n=n, u=u)
# Den Punkt in einem Punkt List abspeichern
Points.append(Point(x=CK[0][0], y=CK[0][1]))
# F�r die erste Ableitung wird den Winkel der tangente errechnet
if n >= 1:
tang.append(atan2(CK[1][1], CK[1][0]))
u += step
return Points, tang
def bspline_ders_evaluate(self, n=0, u=0):
"""
Modified Version of Algorithm A3.2 from "THE NURBS BOOK" pg.93
"""
# Berechnung der Position im Knotenvektor
span = self.findspan(u)
# Berechnen der Basis Funktion bis zur n ten Ableitung am Punkt u
dN = self.ders_basis_functions(span, u, n)
p = self.degree
du = min(n, p)
# logger.debug(du)
CK = []
dPts = []
for i in range(self.CPt_len):
dPts.append(0.0)
for k in range(n + 1):
CK.append(dPts[:])
for k in range(du + 1):
for j in range(p + 1):
for i in range(self.CPt_len):
CK[k][i] += dN[k][j] * self.CPts[span - p + j][i]
return CK
def findspan(self, u):
"""
Algorithm A2.1 from "THE NURBS BOOK" pg.68
"""
# logger.debug(u)
# logger.debug(self.degree)
# logger.debug(self.Knots)
# Spezialfall wenn der Wert==Endpunkt ist
if u == self.Knots[-1]:
return self.Knots_len - self.degree - 2 # self.Knots_len #-1
# Bin�re Suche starten
# (Der Interval von low zu high wird immer halbiert bis
# wert zwischen im Intervall von Knots[mid:mi+1] liegt)
low = self.degree-1
high = self.Knots_len
mid = (low + high) // 2
counter = 1
while u < self.Knots[mid] or u >= self.Knots[mid + 1]:
counter += 1
if u < self.Knots[mid]:
high = mid
else:
low = mid
mid = (low + high) // 2
if debug_on:
logger.debug("high: %s; low: %s; mid: %s" % (high, low, mid))
logger.debug("u: %s; self.Knots[mid]: %s; self.Knots[mid+1]: %s" %
(u, self.Knots[mid], self.Knots[mid + 1]))
if counter > 100:
raise ValueError("Iterations above 100 cannot find span")
return mid
def ders_basis_functions(self, span, u, n):
"""
Algorithm A2.3 from "THE NURBS BOOK" pg.72
"""
d = self.degree
# initialisation of the a Matrix
a = []
zeile = []
for j in range(d + 1):
zeile.append(0.0)
a.append(zeile[:]); a.append(zeile[:])
# initialisation of the ndu Matrix
ndu = []
zeile = []
for i in range(d + 1):
zeile.append(0.0)
for j in range(d + 1):
ndu.append(zeile[:])
# initialisation of the ders Matrix
ders = []
zeile = []
for i in range(d + 1):
zeile.append(0.0)
for j in range(n + 1):
ders.append(zeile[:])
ndu[0][0] = 1.0
left = [0]
right = [0]
for j in range(1, d + 1):
# print('komisch span:%s, j:%s, u:%s, gesamt: %s' %(span,j,u,span+1-j))
left.append(u - self.Knots[span + 1 - j])
right.append(self.Knots[span + j] - u)
saved = 0.0
for r in range(j):
# Lower Triangle
ndu[j][r] = right[r + 1] + left[j - r]
temp = ndu[r][j - 1] / ndu[j][r]
# Upper Triangle
ndu[r][j] = saved + right[r + 1] * temp
saved = left[j - r] * temp
ndu[j][j] = saved
# Ergebniss aus S71
# print("Ndu: %s" %ndu)
# Load the basis functions
for j in range(d + 1):
ders[0][j] = ndu[j][d]
# This section computes the derivatives (Eq. [2.9])
for r in range(d + 1): # Loop over function index
s1 = 0; s2 = 1 # Alternate rows in array a
a[0][0] = 1.0
for k in range(1, n + 1):
der = 0.0
rk = r - k; pk = d - k
# print("\nrk: %s" %rk), print("pk: %s" %pk), print("s1: %s" %s1)
# print("s2: %s" %s2), print("r: %s" %r) ,print("k: %s" %k)
# print("j: %s" %j)
# wenn r-k>0 (Linker Term) und somit
if r >= k:
a[s2][0] = a[s1][0] / ndu[pk + 1][rk] # 2te: a[0][0] 1/
# print("a[%s][0]=a[%s][0](%s)/ndu[%s][%s](%s)=%s" \
# %(s2,s1,a[s1][0],pk+1,rk,ndu[pk+1][rk],a[s2][0]))
der = a[s2][0] * ndu[rk][pk]
if rk >= -1:
j1 = 1
else:
j1 = -rk
if r - 1 <= pk:
j2 = k - 1
else:
j2 = d - r
# Hier geht er bei der ersten Ableitung gar nicht rein
# print("j1:%s j2:%s" %(j1,j2))
for j in range(j1, j2 + 1):
a[s2][j] = (a[s1][j] - a[s1][j - 1]) / ndu[pk + 1][rk + j]
der += a[s2][j] * ndu[rk + j][pk]
if(r <= pk):
a[s2][k] = -a[s1][k - 1] / ndu[pk + 1][r] # 1/ u(i+p+1)-u(i+1)
der += a[s2][k] * ndu[r][pk] # N(i+1)(p-1)
# print("a[%s][%s]=-a[%s][%s](%s)/ndu[%s][%s](%s)=%s" \
# %(s2,k,s1,k-1,a[s1][k-1],pk+1,r,ndu[pk+1][r],a[s2][k]))
# print("ndu[%s][%s]=%s" %(r,pk,ndu[r][pk]))
ders[k][r] = der
# print("ders[%s][%s]=%s" %(k,r,der))
j = s1; s1 = s2; s2 = j # Switch rows
# Multiply through by the correct factors
r = d
for k in range(1, n + 1):
for j in range(d + 1):
ders[k][j] *= r
r *= (d - k)
return ders
| hehongyu1995/Dxf2GCode | dxfimport/spline_convert.py | Python | gpl-3.0 | 30,731 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.