text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
#!/usr/bin/env python
# Small wrapper script for weeder2, which needs the FreqFiles directory
# where it is executed. This script allows running weeder2 from anywhere.
import os
import sys
import argparse
import subprocess as sp
# Weeder install dir
weeder_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "share", "weeder2"))
weeder_exe = "weeder2"
weeder_help = sp.check_output(
os.path.join(weeder_dir, weeder_exe),
stderr=sp.STDOUT).decode()
parser = argparse.ArgumentParser()
parser.add_argument("-f", dest="fname")
args, unknownargs = parser.parse_known_args()
if not args.fname:
print(weeder_help)
sys.exit()
fname = os.path.abspath(args.fname)
rest = " ".join(unknownargs)
cmd = "./{} -f {} {}".format(weeder_exe, fname, rest)
sys.exit(sp.call(cmd, shell=True, cwd=weeder_dir))
|
roryk/recipes
|
recipes/weeder/weeder2.py
|
Python
|
mit
| 833
| 0.0012
|
# Copyright 2017 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import socket
import time
from xml.etree import ElementTree
from oslo_log import log as oslo_logging
import six
import untangle
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit import constant
from cloudbaseinit import exception
from cloudbaseinit.metadata.services import base
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.utils import dhcp
from cloudbaseinit.utils import encoding
from cloudbaseinit.utils.windows import x509
CONF = cloudbaseinit_conf.CONF
LOG = oslo_logging.getLogger(__name__)
WIRESERVER_DHCP_OPTION = 245
WIRE_SERVER_VERSION = '2015-04-05'
GOAL_STATE_STARTED = "Started"
HEALTH_STATE_READY = "Ready"
HEALTH_STATE_NOT_READY = "NotReady"
HEALTH_SUBSTATE_PROVISIONING = "Provisioning"
HEALTH_SUBSTATE_PROVISIONING_FAILED = "ProvisioningFailed"
ROLE_PROPERTY_CERT_THUMB = "CertificateThumbprint"
OVF_ENV_DRIVE_TAG = "E6DA6616-8EC4-48E0-BE93-58CE6ACE3CFB.tag"
OVF_ENV_FILENAME = "ovf-env.xml"
CUSTOM_DATA_FILENAME = "CustomData.bin"
DATALOSS_WARNING_PATH = '$$\\OEM\\DATALOSS_WARNING_README.txt'
DEFAULT_KMS_HOST = "kms.core.windows.net"
class AzureService(base.BaseHTTPMetadataService):
def __init__(self):
super(AzureService, self).__init__(base_url=None)
self._enable_retry = True
self._goal_state = None
self._config_set_drive_path = None
self._ovf_env = None
self._headers = {"x-ms-guest-agent-name": "cloudbase-init"}
self._osutils = osutils_factory.get_os_utils()
def _get_wire_server_endpoint_address(self):
total_time = 300
poll_time = 5
retries = total_time / poll_time
while True:
try:
options = dhcp.get_dhcp_options()
endpoint = (options or {}).get(WIRESERVER_DHCP_OPTION)
if not endpoint:
raise exception.MetadaNotFoundException(
"Cannot find Azure WireServer endpoint address")
return socket.inet_ntoa(endpoint)
except Exception:
if not retries:
raise
time.sleep(poll_time)
retries -= 1
def _check_version_header(self):
if "x-ms-version" not in self._headers:
versions = self._get_versions()
if WIRE_SERVER_VERSION not in versions.Versions.Supported.Version:
raise exception.MetadaNotFoundException(
"Unsupported Azure WireServer version: %s" %
WIRE_SERVER_VERSION)
self._headers["x-ms-version"] = WIRE_SERVER_VERSION
def _get_versions(self):
return self._wire_server_request("?comp=Versions")
def _wire_server_request(self, path, data_xml=None, headers=None,
parse_xml=True):
if not self._base_url:
raise exception.CloudbaseInitException(
"Azure WireServer base url not set")
all_headers = self._headers.copy()
if data_xml:
all_headers["Content-Type"] = "text/xml; charset=utf-8"
if headers:
all_headers.update(headers)
data = self._exec_with_retry(
lambda: super(AzureService, self)._http_request(
path, data_xml, headers=all_headers))
if parse_xml:
return untangle.parse(six.StringIO(encoding.get_as_string(data)))
else:
return data
@staticmethod
def _encode_xml(xml_root):
bio = six.BytesIO()
ElementTree.ElementTree(xml_root).write(
bio, encoding='utf-8', xml_declaration=True)
return bio.getvalue()
def _get_health_report_xml(self, state, sub_status=None, description=None):
xml_root = ElementTree.Element('Health')
xml_goal_state_incarnation = ElementTree.SubElement(
xml_root, 'GoalStateIncarnation')
xml_goal_state_incarnation.text = str(self._get_incarnation())
xml_container = ElementTree.SubElement(xml_root, 'Container')
xml_container_id = ElementTree.SubElement(xml_container, 'ContainerId')
xml_container_id.text = self._get_container_id()
xml_role_instance_list = ElementTree.SubElement(
xml_container, 'RoleInstanceList')
xml_role = ElementTree.SubElement(xml_role_instance_list, 'Role')
xml_role_instance_id = ElementTree.SubElement(xml_role, 'InstanceId')
xml_role_instance_id.text = self._get_role_instance_id()
xml_health = ElementTree.SubElement(xml_role, 'Health')
xml_state = ElementTree.SubElement(xml_health, 'State')
xml_state.text = state
if sub_status:
xml_details = ElementTree.SubElement(xml_health, 'Details')
xml_sub_status = ElementTree.SubElement(xml_details, 'SubStatus')
xml_sub_status.text = sub_status
xml_description = ElementTree.SubElement(
xml_details, 'Description')
xml_description.text = description
return self._encode_xml(xml_root)
def _get_role_properties_xml(self, properties):
xml_root = ElementTree.Element('RoleProperties')
xml_container = ElementTree.SubElement(xml_root, 'Container')
xml_container_id = ElementTree.SubElement(xml_container, 'ContainerId')
xml_container_id.text = self._get_container_id()
xml_role_instances = ElementTree.SubElement(
xml_container, 'RoleInstances')
xml_role_instance = ElementTree.SubElement(
xml_role_instances, 'RoleInstance')
xml_role_instance_id = ElementTree.SubElement(
xml_role_instance, 'Id')
xml_role_instance_id.text = self._get_role_instance_id()
xml_role_properties = ElementTree.SubElement(
xml_role_instance, 'Properties')
for name, value in properties.items():
ElementTree.SubElement(
xml_role_properties, 'Property', name=name, value=value)
return self._encode_xml(xml_root)
def _get_goal_state(self, force_update=False):
if not self._goal_state or force_update:
self._goal_state = self._wire_server_request(
"machine?comp=goalstate").GoalState
expected_state = self._goal_state.Machine.ExpectedState
if expected_state != GOAL_STATE_STARTED:
raise exception.CloudbaseInitException(
"Invalid machine expected state: %s" % expected_state)
return self._goal_state
def _get_incarnation(self):
goal_state = self._get_goal_state()
return goal_state.Incarnation.cdata
def _get_container_id(self):
goal_state = self._get_goal_state()
return goal_state.Container.ContainerId.cdata
def _get_role_instance_config(self):
goal_state = self._get_goal_state()
role_instance = goal_state.Container.RoleInstanceList.RoleInstance
return role_instance.Configuration
def _get_role_instance_id(self):
goal_state = self._get_goal_state()
role_instance = goal_state.Container.RoleInstanceList.RoleInstance
return role_instance.InstanceId.cdata
def _post_health_status(self, state, sub_status=None, description=None):
health_report_xml = self._get_health_report_xml(
state, sub_status, description)
LOG.debug("Health data: %s", health_report_xml)
self._wire_server_request(
"machine?comp=health", health_report_xml, parse_xml=False)
def provisioning_started(self):
self._post_health_status(
HEALTH_STATE_NOT_READY, HEALTH_SUBSTATE_PROVISIONING,
"Cloudbase-Init is preparing your computer for first use...")
def provisioning_completed(self):
self._post_health_status(HEALTH_STATE_READY)
def provisioning_failed(self):
self._post_health_status(
HEALTH_STATE_NOT_READY, HEALTH_SUBSTATE_PROVISIONING_FAILED,
"Provisioning failed")
def _post_role_properties(self, properties):
role_properties_xml = self._get_role_properties_xml(properties)
LOG.debug("Role properties data: %s", role_properties_xml)
self._wire_server_request(
"machine?comp=roleProperties", role_properties_xml,
parse_xml=False)
@property
def can_post_rdp_cert_thumbprint(self):
return True
def post_rdp_cert_thumbprint(self, thumbprint):
properties = {ROLE_PROPERTY_CERT_THUMB: thumbprint}
self._post_role_properties(properties)
def _get_hosting_environment(self):
config = self._get_role_instance_config()
return self._wire_server_request(config.HostingEnvironmentConfig.cdata)
def _get_shared_config(self):
config = self._get_role_instance_config()
return self._wire_server_request(config.SharedConfig.cdata)
def _get_extensions_config(self):
config = self._get_role_instance_config()
return self._wire_server_request(config.ExtensionsConfig.cdata)
def _get_full_config(self):
config = self._get_role_instance_config()
return self._wire_server_request(config.FullConfig.cdata)
@contextlib.contextmanager
def _create_transport_cert(self, cert_mgr):
x509_thumbprint, x509_cert = cert_mgr.create_self_signed_cert(
"CN=Cloudbase-Init AzureService Transport", machine_keyset=True,
store_name=CONF.azure.transport_cert_store_name)
try:
yield (x509_thumbprint, x509_cert)
finally:
cert_mgr.delete_certificate_from_store(
x509_thumbprint, machine_keyset=True,
store_name=CONF.azure.transport_cert_store_name)
def _get_encoded_cert(self, cert_url, transport_cert):
cert_config = self._wire_server_request(
cert_url, headers={"x-ms-guest-agent-public-x509-cert":
transport_cert.replace("\r\n", "")})
cert_data = cert_config.CertificateFile.Data.cdata
cert_format = cert_config.CertificateFile.Format.cdata
return cert_data, cert_format
def get_server_certs(self):
def _get_store_location(store_location):
if store_location == u"System":
return constant.CERT_LOCATION_LOCAL_MACHINE
else:
return store_location
certs_info = []
config = self._get_role_instance_config()
if not hasattr(config, 'Certificates'):
return certs_info
cert_mgr = x509.CryptoAPICertManager()
with self._create_transport_cert(cert_mgr) as (
transport_cert_thumbprint, transport_cert):
cert_url = config.Certificates.cdata
cert_data, cert_format = self._get_encoded_cert(
cert_url, transport_cert)
pfx_data = cert_mgr.decode_pkcs7_base64_blob(
cert_data, transport_cert_thumbprint, machine_keyset=True,
store_name=CONF.azure.transport_cert_store_name)
host_env = self._get_hosting_environment()
host_env_config = host_env.HostingEnvironmentConfig
for cert in host_env_config.StoredCertificates.StoredCertificate:
certs_info.append({
"store_name": cert["storeName"],
"store_location": _get_store_location(
cert["configurationLevel"]),
"certificate_id": cert["certificateId"],
"name": cert["name"],
"pfx_data": pfx_data,
})
return certs_info
def get_instance_id(self):
return self._get_role_instance_id()
def _get_config_set_drive_path(self):
if not self._config_set_drive_path:
base_paths = self._osutils.get_logical_drives()
for base_path in base_paths:
tag_path = os.path.join(base_path, OVF_ENV_DRIVE_TAG)
if os.path.exists(tag_path):
self._config_set_drive_path = base_path
if not self._config_set_drive_path:
raise exception.ItemNotFoundException(
"No drive containing file %s could be found" %
OVF_ENV_DRIVE_TAG)
return self._config_set_drive_path
def _get_ovf_env_path(self):
base_path = self._get_config_set_drive_path()
ovf_env_path = os.path.join(base_path, OVF_ENV_FILENAME)
if not os.path.exists(ovf_env_path):
raise exception.ItemNotFoundException(
"ovf-env path does not exist: %s" % ovf_env_path)
LOG.debug("ovs-env path: %s", ovf_env_path)
return ovf_env_path
def _get_ovf_env(self):
if not self._ovf_env:
ovf_env_path = self._get_ovf_env_path()
self._ovf_env = untangle.parse(ovf_env_path)
return self._ovf_env
def get_admin_username(self):
ovf_env = self._get_ovf_env()
prov_section = ovf_env.Environment.wa_ProvisioningSection
win_prov_conf_set = prov_section.WindowsProvisioningConfigurationSet
return win_prov_conf_set.AdminUsername.cdata
def get_admin_password(self):
ovf_env = self._get_ovf_env()
prov_section = ovf_env.Environment.wa_ProvisioningSection
win_prov_conf_set = prov_section.WindowsProvisioningConfigurationSet
return win_prov_conf_set.AdminPassword.cdata
def get_host_name(self):
ovf_env = self._get_ovf_env()
prov_section = ovf_env.Environment.wa_ProvisioningSection
win_prov_conf_set = prov_section.WindowsProvisioningConfigurationSet
return win_prov_conf_set.ComputerName.cdata
def get_enable_automatic_updates(self):
ovf_env = self._get_ovf_env()
prov_section = ovf_env.Environment.wa_ProvisioningSection
win_prov_conf_set = prov_section.WindowsProvisioningConfigurationSet
if hasattr(win_prov_conf_set, "EnableAutomaticUpdates"):
auto_updates = win_prov_conf_set.EnableAutomaticUpdates.cdata
return auto_updates.lower() == "true"
return False
def get_winrm_listeners_configuration(self):
listeners_config = []
ovf_env = self._get_ovf_env()
prov_section = ovf_env.Environment.wa_ProvisioningSection
win_prov_conf_set = prov_section.WindowsProvisioningConfigurationSet
if hasattr(win_prov_conf_set, "WinRM"):
for listener in win_prov_conf_set.WinRM.Listeners.Listener:
protocol = listener.Protocol.cdata
config = {"protocol": protocol}
if hasattr(listener, "CertificateThumbprint"):
cert_thumbprint = listener.CertificateThumbprint.cdata
config["certificate_thumbprint"] = cert_thumbprint
listeners_config.append(config)
return listeners_config
def get_vm_agent_package_provisioning_data(self):
ovf_env = self._get_ovf_env()
plat_sett_section = ovf_env.Environment.wa_PlatformSettingsSection
plat_sett = plat_sett_section.PlatformSettings
prov_ga = False
ga_package_name = None
if hasattr(plat_sett, "ProvisionGuestAgent"):
prov_ga = plat_sett.ProvisionGuestAgent.cdata.lower() == "true"
if hasattr(plat_sett, "GuestAgentPackageName"):
ga_package_name = plat_sett.GuestAgentPackageName.cdata
return {"provision": prov_ga,
"package_name": ga_package_name}
def get_kms_host(self):
ovf_env = self._get_ovf_env()
plat_sett_section = ovf_env.Environment.wa_PlatformSettingsSection
host = None
if hasattr(plat_sett_section.PlatformSettings, "KmsServerHostname"):
host = plat_sett_section.PlatformSettings.KmsServerHostname.cdata
return host or DEFAULT_KMS_HOST
def get_use_avma_licensing(self):
ovf_env = self._get_ovf_env()
plat_sett_section = ovf_env.Environment.wa_PlatformSettingsSection
if hasattr(plat_sett_section.PlatformSettings, "UseAVMA"):
use_avma = plat_sett_section.PlatformSettings.UseAVMA.cdata
return use_avma.lower() == "true"
return False
def _check_ovf_env_custom_data(self):
# If the custom data file is missing, ensure the configuration matches
ovf_env = self._get_ovf_env()
prov_section = ovf_env.Environment.wa_ProvisioningSection
win_prov_conf_set = prov_section.WindowsProvisioningConfigurationSet
if hasattr(win_prov_conf_set, "CustomData"):
return True
def get_user_data(self):
try:
return self.get_content(CUSTOM_DATA_FILENAME)
except base.NotExistingMetadataException:
if self._check_ovf_env_custom_data():
raise exception.ItemNotFoundException(
"Custom data configuration exists, but the custom data "
"file is not present")
raise
def get_decoded_user_data(self):
# Don't decode to retain compatibility
return self.get_user_data()
def get_content(self, name):
base_path = self._get_config_set_drive_path()
content_path = os.path.join(base_path, name)
if not os.path.exists(content_path):
raise base.NotExistingMetadataException()
with open(content_path, 'rb') as f:
return f.read()
def get_ephemeral_disk_data_loss_warning(self):
return self.get_content(DATALOSS_WARNING_PATH)
def load(self):
try:
wire_server_endpoint = self._get_wire_server_endpoint_address()
self._base_url = "http://%s" % wire_server_endpoint
except Exception:
LOG.debug("Azure WireServer endpoint not found")
return False
try:
super(AzureService, self).load()
self._check_version_header()
self._get_ovf_env()
return True
except Exception as ex:
LOG.exception(ex)
return False
|
ader1990/cloudbase-init
|
cloudbaseinit/metadata/services/azureservice.py
|
Python
|
apache-2.0
| 18,702
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for XenAPI."""
import ast
import contextlib
import datetime
import functools
import json
import os
import re
import stubout
from nova import db
from nova import context
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.compute import instance_types
from nova.compute import power_state
from nova import exception
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import vm_utils
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
from nova.tests import fake_network
from nova.tests import fake_utils
LOG = logging.getLogger('nova.tests.test_xenapi')
FLAGS = flags.FLAGS
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_stream_disk(*args, **kwargs):
pass
def fake_is_vdi_pv(*args, **kwargs):
return should_return
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_stream_disk = vm_utils._stream_disk
orig_is_vdi_pv = vm_utils._is_vdi_pv
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
vm_utils._stream_disk = fake_stream_disk
vm_utils._is_vdi_pv = fake_is_vdi_pv
return function(self, *args, **kwargs)
finally:
vm_utils._is_vdi_pv = orig_is_vdi_pv
vm_utils._stream_disk = orig_stream_disk
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
class XenAPIVolumeTestCase(test.TestCase):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
self.instance_values = {'id': 1,
'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
def _create_volume(self, size='0'):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
@staticmethod
def _make_info():
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.1:3260,fake',
'auth_method': 'CHAP',
'auth_method': 'fake',
'auth_method': 'fake',
}
}
def test_parse_volume_info_raise_exception(self):
"""This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info,
self._make_info(),
'dev/sd'
)
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_attach_volume(self):
"""This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(self._make_info(),
instance.name, '/dev/sdc')
def check():
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
check()
def test_attach_volume_raise_exception(self):
"""This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(Exception,
conn.attach_volume,
instance.name,
volume['id'],
'/dev/sdc')
def tearDown(self):
super(XenAPIVolumeTestCase, self).tearDown()
self.stubs.UnsetAll()
def configure_instance(*args):
pass
def _find_rescue_vbd_ref(*args):
pass
class XenAPIVMTestCase(test.TestCase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(vmops.VMOps, '_configure_instance',
configure_instance)
self.stubs.Set(vmops.VMOps, '_find_rescue_vbd_ref',
_find_rescue_vbd_ref)
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.get_connection(False)
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
def test_get_diagnostics(self):
instance = self._create_instance()
self.conn.get_diagnostics(instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
name = "MySnapshot"
self.assertRaises(exception.Error, self.conn.snapshot,
self.context, instance, name)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
name = "MySnapshot"
template_vm_ref = self.conn.snapshot(self.context, instance, name)
def ensure_vm_was_torn_down():
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEquals(vm_labels, ['1'])
def ensure_vbd_was_torn_down():
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEquals(vbd_labels, ['1'])
def ensure_vdi_was_torn_down():
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assert_(not name_label.endswith('snapshot'))
def check():
ensure_vm_was_torn_down()
ensure_vbd_was_torn_down()
ensure_vdi_was_torn_down()
check()
def create_vm_record(self, conn, os_type, instance_id=1):
instances = conn.list_instances()
self.assertEquals(instances, [str(instance_id)])
# Get Nova record for VM
vm_info = conn.get_info(instance_id)
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, check_injection=False):
# Check that m1.large above turned into the right thing.
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
self.assertEquals(self.vm_info['max_mem'], mem_kib)
self.assertEquals(self.vm_info['mem'], mem_kib)
self.assertEquals(self.vm['memory_static_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEquals(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
key = 'vm-data/networking/DEADBEEF0000'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEquals(tcpip_data,
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], '')
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEquals(self.vm['PV_kernel'], '')
self.assertNotEquals(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password)
return session.call_xenapi('VDI.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if not vdi_ref in start_list:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
check_injection=False,
create_record=True, empty_dns=False):
stubs.stubout_loopingcall_start(self.stubs)
if create_record:
instance_values = {'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'root_gb': 20,
'instance_type_id': instance_type_id,
'os_type': os_type,
'hostname': hostname,
'architecture': architecture}
instance = db.instance_create(self.context, instance_values)
else:
instance = db.instance_get(self.context, instance_id)
network_info = [({'bridge': 'fa0', 'id': 0,
'injected': True,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::1/120',
},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
if empty_dns:
network_info[0][1]['dns'] = []
image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
'disk_format': 'vhd'}
self.conn.spawn(self.context, instance, image_meta, network_info)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type)
self.assertTrue(instance.architecture)
def test_spawn_empty_dns(self):
"""Test spawning with an empty dns list"""
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
stubs.stubout_fetch_image_glance_disk(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
It verifies that VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
@stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_swapdisk(self):
# Change the default host_call_plugin to one that'll return
# a swap disk
orig_func = stubs.FakeSessionForVMTests.host_call_plugin
stubs.FakeSessionForVMTests.host_call_plugin = \
stubs.FakeSessionForVMTests.host_call_plugin_swap
# Stubbing out firewall driver as previous stub sets a particular
# stub for async plugin calls
stubs.stubout_firewall_driver(self.stubs, self.conn)
try:
# We'll steal the above glance linux test
self.test_spawn_vhd_glance_linux()
finally:
# Make sure to put this back
stubs.FakeSessionForVMTests.host_call_plugin = orig_func
# We should have 2 VBDs.
self.assertEqual(len(self.vm['VBDs']), 2)
# Now test that we have 1.
self.tearDown()
self.setUp()
self.test_spawn_vhd_glance_linux()
self.assertEqual(len(self.vm['VBDs']), 1)
def test_spawn_vhd_glance_windows(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
input = kwargs.get('process_input', None)
self.assertNotEqual(input, None)
config = [line.strip() for line in input.split("\n")]
# Find the start of eth0 configuration and check it
index = config.index('auto eth0')
self.assertEquals(config[index + 1:index + 8], [
'iface eth0 inet static',
'address 192.168.0.100',
'netmask 255.255.255.0',
'broadcast 192.168.0.255',
'gateway 192.168.0.1',
'dns-nameservers 192.168.0.1',
''])
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
])
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug(_('Creating files in %s to simulate guest agent' %
self._tmpdir))
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
LOG.debug(_('Removing simulated guest agent files in %s' %
self._tmpdir))
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn(1, 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_vlanmanager(self):
self.flags(image_service='nova.image.glance.GlanceImageService',
network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, 'create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
instance = self._create_instance(2, False)
networks = self.network.db.network_get_all(ctxt)
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=2,
instance_uuid="00000000-0000-0000-0000-000000000000",
host=FLAGS.host,
vpn=None,
instance_type_id=1,
project_id=self.project_id)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
instance_id=2,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 1024))
def test_rescue(self):
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
conn.rescue(self.context, instance, [], None)
def test_unrescue(self):
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(Exception, conn.unrescue, instance)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock():
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, instance):
self.finish_revert_migration_called = True
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(instance)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
stubs.stubout_loopingcall_start(self.stubs)
instance_values = {
'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, instance_values)
network_info = [({'bridge': 'fa0', 'id': 0,
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::1/120',
},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
'disk_format': 'vhd'}
if spawn:
self.conn.spawn(self.context, instance, image_meta, network_info)
return instance
class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = vmops.SimpleDH()
self.bob = vmops.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEquals(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEquals(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
def tearDown(self):
super(XenAPIDiffieHellmanTestCase, self).tearDown()
class XenAPIMigrateInstance(test.TestCase):
"""Unit test for verifying migration-related actions."""
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 5,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
def test_resize_xenserver_6(self):
instance = db.instance_create(self.context, self.instance_values)
called = {'resize': False}
def fake_vdi_resize(*args, **kwargs):
called['resize'] = True
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize", fake_vdi_resize)
stubs.stubout_session(self.stubs,
stubs.FakeSessionForMigrationTests,
product_version=(6, 0, 0))
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
conn._vmops._resize_instance(instance, '')
self.assertEqual(called['resize'], True)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', instance_type)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)
conn = xenapi_conn.get_connection(False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance, '127.0.0.1', instance_type)
def test_revert_migrate(self):
instance = db.instance_create(self.context, self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
conn.finish_revert_migration(instance)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_finish_migrate(self):
instance = db.instance_create(self.context, self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
def test_finish_migrate_no_local_storage(self):
tiny_type_id = \
instance_types.get_instance_type_by_name('m1.tiny')['id']
self.instance_values.update({'instance_type_id': tiny_type_id,
'root_gb': 0})
instance = db.instance_create(self.context, self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = db.instance_create(self.context, self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
# Resize instance would be determined by the compute call
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
"""Can convert from type id to type string."""
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def test_from_string(self):
"""Can convert from string to type id."""
self.assertEquals(
vm_utils.ImageType.from_string(vm_utils.ImageType.KERNEL_STR),
vm_utils.ImageType.KERNEL)
class XenAPIDetermineDiskImageTestCase(test.TestCase):
"""Unit tests for code that detects the ImageType."""
def setUp(self):
super(XenAPIDetermineDiskImageTestCase, self).setUp()
glance_stubs.stubout_glance_client(self.stubs)
class FakeInstance(object):
pass
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
self.fake_instance.os_type = 'linux'
self.fake_instance.architecture = 'x86-64'
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.VMHelper.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = {'id': 'a', 'disk_format': 'ami'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = {'id': 'a', 'disk_format': 'raw'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = {'id': 'a', 'disk_format': 'vhd'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
"""Test that cmp_version compares a as less than b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
"""Test that cmp_version compares a as greater than b"""
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
"""Test that cmp_version compares a as equal to b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
"""Test that cmp_version compares non-lexically"""
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
"""Test that cmp_version compares by length as last resort"""
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
class FakeXenApi(object):
"""Fake XenApi for testing HostState."""
class FakeSR(object):
def get_record(self, ref):
return {'virtual_allocation': 10000,
'physical_utilisation': 20000}
SR = FakeSR()
class FakeSession(object):
"""Fake Session class for HostState testing."""
def async_call_plugin(self, *args):
return None
def wait_for_task(self, *args):
vm = {'total': 10,
'overhead': 20,
'free': 30,
'free-computed': 40}
return json.dumps({'host_memory': vm})
def call_xenapi(self, method, *args):
f = FakeXenApi()
for m in method.split('.'):
f = getattr(f, m)
return f(*args)
class HostStateTestCase(test.TestCase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers."""
@classmethod
def _fake_safe_find_sr(cls, session):
"""None SR ref since we're ignoring it in FakeSR."""
return None
def test_host_state(self):
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(vm_utils.VMHelper, 'safe_find_sr',
self._fake_safe_find_sr)
host_state = xenapi_conn.HostState(FakeSession())
stats = host_state._stats
self.assertEquals(stats['disk_total'], 10000)
self.assertEquals(stats['disk_used'], 20000)
self.assertEquals(stats['host_memory_total'], 10)
self.assertEquals(stats['host_memory_overhead'], 20)
self.assertEquals(stats['host_memory_free'], 30)
self.assertEquals(stats['host_memory_free_computed'], 40)
class XenAPIAutoDiskConfigTestCase(test.TestCase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
@classmethod
def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
bootable=True):
pass
self.stubs.Set(volume_utils.VolumeHelper,
"create_vbd",
fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old, new):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
instance = db.instance_create(self.context, self.instance_values)
disk_image_type = vm_utils.ImageType.DISK_VHD
vm_ref = "blah"
first_vdi_ref = "blah"
vdis = ["blah"]
self.conn._vmops._attach_disks(
instance, disk_image_type, vm_ref, first_vdi_ref, vdis)
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
"""Should not partition unless fail safes pass"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
class XenAPIGenerateLocal(test.TestCase):
"""Test generating of local disks, like swap and ephemeral"""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
xenapi_generate_swap=True,
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
@classmethod
def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
bootable=True):
pass
self.stubs.Set(volume_utils.VolumeHelper,
"create_vbd",
fake_create_vbd)
def assertCalled(self, instance):
disk_image_type = vm_utils.ImageType.DISK_VHD
vm_ref = "blah"
first_vdi_ref = "blah"
vdis = ["blah"]
self.called = False
self.conn._vmops._attach_disks(instance, disk_image_type,
vm_ref, first_vdi_ref, vdis)
self.assertTrue(self.called)
def test_generate_swap(self):
"""Test swap disk generation."""
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['id'],
{'instance_type_id': 5})
@classmethod
def fake_generate_swap(cls, *args, **kwargs):
self.called = True
self.stubs.Set(vm_utils.VMHelper, 'generate_swap',
fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
"""Test ephemeral disk generation."""
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['id'],
{'instance_type_id': 4})
@classmethod
def fake_generate_ephemeral(cls, *args):
self.called = True
self.stubs.Set(vm_utils.VMHelper, 'generate_ephemeral',
fake_generate_ephemeral)
self.assertCalled(instance)
class XenAPIBWUsageTestCase(test.TestCase):
def setUp(self):
super(XenAPIBWUsageTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(vm_utils.VMHelper, "compile_metrics",
XenAPIBWUsageTestCase._fake_compile_metrics)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
@classmethod
def _fake_compile_metrics(cls, session, start_time, stop_time=None):
raise exception.CouldNotFetchMetrics()
def test_get_all_bw_usage_in_failure_case(self):
"""Test that get_all_bw_usage returns an empty list when metrics
compilation failed. c.f. bug #910045.
"""
result = self.conn.get_all_bw_usage(datetime.datetime.utcnow())
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.test_libvirt.IPTablesFirewallDriverTestCase share a lot of code.
# Consider abstracting common code in a base class for firewall driver testing.
class XenAPIDom0IptablesFirewallTestCase(test.TestCase):
_in_nat_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
]
_in_filter_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.stubs = stubout.StubOutForTesting()
xenapi_fake.reset()
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = utils.import_object(FLAGS.network_manager)
self.conn = xenapi_conn.get_connection(False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_filter_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('-A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
def get_fixed_ips(*args, **kwargs):
ips = []
for _n, info in network_info:
ips.extend(info['ips'])
return [ip['ip'] for ip in ips]
def nw_info(*args, **kwargs):
return network_info
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
nw_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in get_fixed_ips():
regex = re.compile('-A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip)
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['id'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
network_info = fake_network.\
fake_get_instance_nw_info(self.stubs,
networks_count,
ipv4_addr_per_network)
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
self.assertEquals(ipv4_network_rules,
ipv4_rules_per_addr * ipv4_addr_per_network * networks_count)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instances[instance_ref['id']] = instance_ref
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
#validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('-A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly."
"The rule for UDP acceptance is missing")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: as in libvirt tests
# peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
|
KarimAllah/nova
|
nova/tests/test_xenapi.py
|
Python
|
apache-2.0
| 69,840
| 0.00126
|
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import json
import platform
from collections import defaultdict
from anaconda_go.lib import go
from anaconda_go.lib.plugin import typing
cachepath = {
'linux': os.path.join('~', '.local', 'share', 'anaconda', 'cache'),
'darwin': os.path.join('~', 'Library', 'Cache', 'anaconda'),
'windows': os.path.join(os.getenv('APPDATA') or '~', 'Anaconda', 'Cache')
}
cache_directory = os.path.expanduser(
cachepath.get(platform.system().lower())
)
PACKAGES_CACHE = defaultdict(lambda: [])
def append(package: typing.Dict) -> None:
"""Append the given package into the cache
"""
global PACKAGES_CACHE
if not package_in_cache(package):
PACKAGES_CACHE[go.GOROOT].append(package)
def package_in_cache(package: typing.Dict) -> bool:
"""Look for the given package in the cache and return true if is there
"""
for pkg in PACKAGES_CACHE[go.GOROOT]:
if pkg['ImportPath'] == package['ImportPath']:
return True
return False
def lookup(node_name: str='') -> typing.Dict:
"""Lookup the given node_name in the cache and return it back
"""
node = {}
if node_name == '':
node = PACKAGES_CACHE[go.GOROOT]
else:
for pkg in PACKAGES_CACHE[go.GOROOT]:
guru = pkg.get('Guru')
if guru is None:
continue
path = guru['package'].get('path')
if path is not None and path == node_name:
node = guru
break
for member in guru['package'].get('members', []):
if member.get('name') == node_name:
node = member
break
for method in member.get('methods', []):
if method['name'] == node_name:
node = method
break
return node
def persist_package_cache() -> None:
"""Write the contents of the package cache for this GOROOT into the disk
"""
gopath = go.GOPATH.replace(os.path.sep, '_')
cachefile = os.path.join(cache_directory, gopath, 'packages.cache')
if not os.path.exists(os.path.dirname(cachefile)):
os.makedirs(os.path.dirname(cachefile))
with open(cachefile, 'w') as fd:
json.dump(PACKAGES_CACHE[go.GOROOT], fd)
def load_package_cache() -> typing.List:
"""Load a previously stores package cache file
"""
global PACKAGES_CACHE
gopath = go.GOPATH.replace(os.path.sep, '_')
cachefile = os.path.join(cache_directory, gopath, 'packages.cache')
try:
with open(cachefile, 'r') as fd:
PACKAGES_CACHE[go.GOROOT] = json.load(fd)
except FileNotFoundError:
pass
|
DamnWidget/anaconda_go
|
lib/cache.py
|
Python
|
gpl-3.0
| 2,829
| 0.000707
|
# ubuntuone.syncdaemon.hash_queue - hash queues
#
# Authors: Facundo Batista <facundo@canonical.com>
# Guillermo Gonzalez <guillermo.gonzalez@canonical.com>
# Alejandro J. Cura <alecu@canonical.com>
#
# Copyright 2009-2011 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module that implements the Hash Queue machinery."""
from __future__ import with_statement
import logging
import threading
import Queue
import time
from twisted.internet import reactor
from twisted.python.util import OrderedDict as TxOrderedDict
from ubuntuone.storageprotocol.content_hash import \
content_hash_factory, crc32
from ubuntuone.platform import (
open_file,
stat_path,
)
from ubuntuone.platform.constants import HASHQUEUE_DELAY
NO_TIMESTAMP = None
class FixedOrderedDict(TxOrderedDict):
"""A t.p.u.OrderedDict that behaves like Python 2.7's OrderedDict."""
def popitem(self, last=False):
"""Take out the first or last item, and return it."""
index = -1 if last else 0
key = self._order[index]
value = self[key]
del self[key]
return (key, value)
def clear(self):
"""Remove every item from the dict."""
self._order = []
TxOrderedDict.clear(self)
try:
# try to use the OrderedDict from stdlib >= 2.7
from collections import OrderedDict as StdlibOrderedDict
OrderedDict = StdlibOrderedDict
except ImportError:
# if not available, use the patched one based on twisted
OrderedDict = FixedOrderedDict
class StopHashing(Exception):
"""The current hash was cancelled."""
class _Hasher(threading.Thread):
"""Class that lives in another thread, hashing all night long."""
def __init__(self, queue, end_mark, event_queue):
self.logger = logging.getLogger('ubuntuone.SyncDaemon.HQ.hasher')
self.end_mark = end_mark
self.queue = queue
self.eq = event_queue
# mutex to access _should_cancel and _hashing attributes
self.mutex = threading.Lock()
self._should_cancel = None
self._stopped = True # start stopped
self.chunk_size = 2**16
self.hashing = None
threading.Thread.__init__(self)
def run(self):
"""Run the thread."""
self._stopped = False
while True:
if self._stopped:
break
info, timestamp = self.queue.get()
if info is self.end_mark:
self._stopped = True
self.queue.task_done()
break
path, mdid = info
with self.mutex:
self.hashing = path
m = "Hasher: got file to hash: path %r mdid %s"
self.logger.debug(m, path, mdid)
now = time.time()
delta = timestamp - now
if delta > 0:
self.logger.trace("Waiting %f before starting hash", delta)
time.sleep(delta)
try:
result = self._hash(path)
except (IOError, OSError), e:
m = "Hasher: hash error %s (path %r mdid %s)"
self.logger.debug(m, e, path, mdid)
reactor.callLater(.1, reactor.callFromThread, self.eq.push,
"HQ_HASH_ERROR", mdid=mdid)
except StopHashing, e:
self.logger.debug(str(e))
else:
hashdata, crc, size, stat = result
self.logger.debug("Hasher: path hash pushed: path=%r hash=%s"
" crc=%s size=%d st_ino=%d st_size=%d"
" st_mtime=%r", path, hashdata,crc, size,
stat.st_ino, stat.st_size, stat.st_mtime)
reactor.callFromThread(self.eq.push, "HQ_HASH_NEW", path=path,
hash=hashdata, crc32=crc,
size=size, stat=stat)
finally:
with self.mutex:
self.hashing = None
self.queue.task_done()
def stop(self):
"""Stop the hasher.
Will be effective in the next loop if a hash is in progress.
"""
# clear the queue to push a end_mark, just to unblok if we are waiting
# for a new item
self.queue.clear()
# set the end_mark in case we are waiting a path
item = (self.end_mark, NO_TIMESTAMP)
self.queue.put(item)
self._stopped = True
def _hash(self, path):
"""Actually hashes a file."""
hasher = content_hash_factory()
crc = 0
size = 0
try:
initial_stat = stat_path(path)
with open_file(path, 'rb') as fh:
while True:
# stop hashing if path_to_cancel == path or _stopped is True
with self.mutex:
path_to_cancel = self._should_cancel
if path_to_cancel == path or self._stopped:
raise StopHashing('hashing of %r was cancelled' % path)
cont = fh.read(self.chunk_size)
if not cont:
break
hasher.update(cont)
crc = crc32(cont, crc)
size += len(cont)
finally:
with self.mutex:
self._should_cancel = None
return hasher.content_hash(), crc, size, initial_stat
def busy(self):
"""Return whether we are busy."""
with self.mutex:
return self.hashing
def cancel_if_running(self, path):
"""Request a cancel/stop of the current hash, if it's == path."""
with self.mutex:
if self.hashing == path:
self._should_cancel = path
class HashQueue(object):
"""Interface between the real Hasher and the rest of the world."""
def __init__(self, event_queue):
self.logger = logging.getLogger('ubuntuone.SyncDaemon.HQ')
self._stopped = False
self._queue = UniqueQueue()
self._end_mark = object()
self.hasher = _Hasher(self._queue, self._end_mark, event_queue)
self.hasher.setDaemon(True)
self.hasher.start()
self.logger.info("HashQueue: _hasher started")
def _timestamp(self):
"""A timestamp with a small delay into the future."""
return time.time() + HASHQUEUE_DELAY
def insert(self, path, mdid):
"""Insert the path of a file to be hashed."""
if self._stopped:
self.logger.warning("HashQueue: already stopped when received "
"path %r mdid %s", path, mdid)
return
self.logger.debug("HashQueue: inserting path %r mdid %s", path, mdid)
self.hasher.cancel_if_running(path)
item = ((path, mdid), self._timestamp())
self._queue.put(item)
def shutdown(self):
"""Shutdown all resources and clear the queue"""
# clear the queue
self._queue.clear()
# stop the hasher
self.hasher.stop()
self._stopped = True
self.logger.info("HashQueue: _hasher stopped")
def empty(self):
"""Return whether we are empty or not"""
return self._queue.empty() and not self.hasher.busy()
def __len__(self):
"""Return the length of the queue (not reliable!)"""
return self._queue.qsize()
def is_hashing(self, path, mdid):
"""Return if the path is being hashed or in the queue."""
if self.hasher.hashing == path:
return True
if (path, mdid) in self._queue:
return True
return False
class UniqueQueue(Queue.Queue):
"""Variant of Queue that only inserts unique items in the Queue."""
def __init__(self, *args, **kwargs):
"""create the instance"""
Queue.Queue.__init__(self, *args, **kwargs)
self.logger = logging.getLogger('ubuntuone.SyncDaemon.HQ.Queue')
def _init(self, maxsize):
"""Override the underlaying data initialization."""
self.queue = OrderedDict()
def _qsize(self, len=len):
"""The size of the queue."""
return len(self.queue)
def _put(self, item):
"""Custom _put that removes previous instances of this item."""
key, value = item
if key in self.queue:
# we must delete it first, so the new one is added to the end
del(self.queue[key])
self.logger.debug('Previous item removed from the queue: %r', key)
self.queue[key] = value
def _get(self):
"""Custom _get that returns the first (key, value) pair."""
return self.queue.popitem(last=False)
def clear(self):
"""clear the internal queue and notify all blocked threads"""
self.queue.clear()
with self.all_tasks_done:
self.unfinished_tasks = 0
self.all_tasks_done.notifyAll()
def __contains__(self, key):
"""Tell if a key is in the queue."""
return key in self.queue
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/ubuntuone-client/ubuntuone/syncdaemon/hash_queue.py
|
Python
|
gpl-3.0
| 9,720
| 0.000412
|
''' Provide functions to embed Bokeh models (e.g., plots, widget, layouts)
in various different ways.
There are a number of different combinations of options when embedding
Bokeh plots. The data for the plot can be contained in the document,
or on a Bokeh server, or in a sidecar JavaScript file. Likewise, BokehJS
may be inlined in the document, or loaded from CDN or a Bokeh server.
The functions in ``bokeh.embed`` provide functionality to embed in all
these different cases.
'''
from __future__ import absolute_import
import re
import uuid
from warnings import warn
from .templates import (
AUTOLOAD_JS, AUTOLOAD_TAG, FILE,
NOTEBOOK_DIV, PLOT_DIV, DOC_JS, SCRIPT_TAG
)
from .util.string import encode_utf8
from .plot_object import PlotObject, _ModelInDocument
from ._json_encoder import serialize_json
from .resources import DEFAULT_SERVER_HTTP_URL
from .client import DEFAULT_SESSION_ID
from .document import Document
from collections import Sequence
from six import string_types
def _wrap_in_function(code):
# indent and wrap Bokeh function def around
code = "\n".join([" " + line for line in code.split("\n")])
return 'Bokeh.$(function() {\n%s\n});' % code
def components(plot_objects, resources=None, wrap_script=True, wrap_plot_info=True):
'''
Return HTML components to embed a Bokeh plot. The data for the plot is
stored directly in the returned HTML.
An example can be found in examples/embed/embed_multiple.py
.. note::
The returned components assume that BokehJS resources are
**already loaded**.
Args:
plot_objects (PlotObject|list|dict|tuple) :
A single PlotObject, a list/tuple of PlotObjects, or a dictionary of keys and PlotObjects.
resources :
Deprecated argument
wrap_script (boolean, optional) :
If True, the returned javascript is wrapped in a script tag.
(default: True)
wrap_plot_info (boolean, optional) : If True, returns ``<div>`` strings.
Otherwise, return dicts that can be used to build your own divs.
(default: True)
If False, the returned dictionary contains the following information:
.. code-block:: python
{
'modelid': 'The model ID, used with Document.get_model_by_id',
'elementid': 'The css identifier the BokehJS will look for to target the plot',
'docid': 'Used by Bokeh to find the doc embedded in the returned script',
}
Returns:
UTF-8 encoded *(script, div[s])* or *(raw_script, plot_info[s])*
Examples:
With default wrapping parameter values:
.. code-block:: python
components(plot)
# => (script, plot_div)
components((plot1, plot2))
# => (script, (plot1_div, plot2_div))
components({"Plot 1": plot1, "Plot 2": plot2})
# => (script, {"Plot 1": plot1_div, "Plot 2": plot2_div})
Examples:
With wrapping parameters set to ``False``:
.. code-block:: python
components(plot, wrap_script=False, wrap_plot_info=False)
# => (javascript, plot_dict)
components((plot1, plot2), wrap_script=False, wrap_plot_info=False)
# => (javascript, (plot1_dict, plot2_dict))
components({"Plot 1": plot1, "Plot 2": plot2}, wrap_script=False, wrap_plot_info=False)
# => (javascript, {"Plot 1": plot1_dict, "Plot 2": plot2_dict})
'''
if resources is not None:
warn('Because the ``resources`` argument is no longer needed, '
'it is deprecated and no longer has any effect',
DeprecationWarning, stacklevel=2)
# 1) Convert single items and dicts into list
was_single_object = isinstance(plot_objects, PlotObject) or isinstance(plot_objects, Document)
# converts single to list
plot_objects = _check_plot_objects(plot_objects, allow_dict=True)
# now convert dict to list, saving keys in the same order
plot_object_keys = None
if isinstance(plot_objects, dict):
plot_object_keys = plot_objects.keys()
values = []
# don't just use .values() to ensure we are in the same order as key list
for k in plot_object_keys:
values.append(plot_objects[k])
plot_objects = values
# 2) Do our rendering
with _ModelInDocument(plot_objects):
(docs_json, render_items) = _standalone_docs_json_and_render_items(plot_objects)
custom_models = _extract_custom_models(plot_objects)
script = _script_for_render_items(docs_json, render_items, custom_models=custom_models,
websocket_url=None, wrap_script=wrap_script)
script = encode_utf8(script)
if wrap_plot_info:
results = list(_div_for_render_item(item) for item in render_items)
else:
results = render_items
# 3) convert back to the input shape
if was_single_object:
return script, results[0]
elif plot_object_keys is not None:
result = {}
for (key, value) in zip(plot_object_keys, results):
result[key] = value
return script, result
else:
return script, tuple(results)
def _escape_code(code):
""" Escape JS/CS source code, so that it can be embbeded in a JS string.
This is based on https://github.com/joliss/js-string-escape.
"""
def escape(match):
ch = match.group(0)
if ch == '"' or ch == "'" or ch == '\\':
return '\\' + ch
elif ch == '\n':
return '\\n'
elif ch == '\r':
return '\\r'
elif ch == '\u2028':
return '\\u2028'
elif ch == '\u2029':
return '\\u2029'
return re.sub(u"""['"\\\n\r\u2028\u2029]""", escape, code)
def _extract_custom_models(plot_objects):
custom_models = {}
def extract_from_model(model):
for r in model.references():
impl = getattr(r.__class__, "__implementation__", None)
if impl is not None:
name = r.__class__.__name__
impl = "['%s', {}]" % _escape_code(impl)
custom_models[name] = impl
for o in plot_objects:
if isinstance(o, Document):
for r in o.roots:
extract_from_model(r)
else:
extract_from_model(o)
return custom_models
def notebook_div(plot_object):
''' Return HTML for a div that will display a Bokeh plot in an
IPython Notebook
The data for the plot is stored directly in the returned HTML.
Args:
plot_object (PlotObject) : Bokeh object to render
Returns:
UTF-8 encoded HTML text for a ``<div>``
.. note::
Assumes :func:`~bokeh.util.notebook.load_notebook` or the equivalent
has already been executed.
'''
plot_object = _check_one_plot_object(plot_object)
with _ModelInDocument(plot_object):
(docs_json, render_items) = _standalone_docs_json_and_render_items([plot_object])
custom_models = _extract_custom_models([plot_object])
script = _script_for_render_items(docs_json, render_items,
custom_models=custom_models,
websocket_url=None)
item = render_items[0]
div = _div_for_render_item(item)
html = NOTEBOOK_DIV.render(
plot_script = script,
plot_div = div,
)
return encode_utf8(html)
def _use_widgets(plot_objects):
from .models.widgets import Widget
for o in plot_objects:
if isinstance(o, Document):
if _use_widgets(o.roots):
return True
else:
if any(isinstance(model, Widget) for model in o.references()):
return True
return False
def file_html(plot_objects,
resources,
title,
js_resources=None,
css_resources=None,
template=FILE,
template_variables={}):
'''Return an HTML document that embeds Bokeh PlotObject or Document objects.
The data for the plot is stored directly in the returned HTML.
This is an alias for standalone_html_page_for_models() which
supports customizing the JS/CSS resources independently and
customizing the jinja2 template.
Args:
plot_objects (PlotObject or Document or list) : Bokeh object or objects to render
typically a PlotObject or Document
resources (Resources) : a resource configuration for BokehJS assets
title (str) : a title for the HTML document ``<title>`` tags
template (Template, optional) : HTML document template (default: FILE)
A Jinja2 Template, see bokeh.templates.FILE for the required
template parameters
template_variables (dict, optional) : variables to be used in the Jinja2
template. If used, the following variable names will be overwritten:
title, js_resources, css_resources, plot_script, plot_div
Returns:
UTF-8 encoded HTML
'''
plot_objects = _check_plot_objects(plot_objects)
with _ModelInDocument(plot_objects):
(docs_json, render_items) = _standalone_docs_json_and_render_items(plot_objects)
custom_models = _extract_custom_models(plot_objects)
return _html_page_for_render_items(resources, docs_json, render_items, title,
custom_models=custom_models, websocket_url=None,
js_resources=js_resources, css_resources=css_resources,
template=template, template_variables=template_variables,
use_widgets=_use_widgets(plot_objects))
# TODO rename this "standalone"?
def autoload_static(plot_object, resources, script_path):
''' Return JavaScript code and a script tag that can be used to embed
Bokeh Plots.
The data for the plot is stored directly in the returned JavaScript code.
Args:
plot_object (PlotObject or Document) :
resources (Resources) :
script_path (str) :
Returns:
(js, tag) :
JavaScript code to be saved at ``script_path`` and a ``<script>``
tag to load it
Raises:
ValueError
'''
if resources.mode == 'inline':
raise ValueError("autoload_static() requires non-inline resources")
# TODO why is this?
if resources.dev:
raise ValueError("autoload_static() only works with non-dev resources")
plot_object = _check_one_plot_object(plot_object)
with _ModelInDocument(plot_object):
(docs_json, render_items) = _standalone_docs_json_and_render_items([plot_object])
item = render_items[0]
model_id = ""
if 'modelid' in item:
model_id = item['modelid']
doc_id = ""
if 'docid' in item:
doc_id = item['docid']
js = AUTOLOAD_JS.render(
docs_json = serialize_json(docs_json),
# TODO we should load all the JS files, but the code
# in AUTOLOAD_JS isn't smart enough to deal with it.
js_url = resources.js_files[0],
css_files = resources.css_files,
elementid = item['elementid'],
websocket_url = None
)
tag = AUTOLOAD_TAG.render(
src_path = script_path,
elementid = item['elementid'],
modelid = model_id,
docid = doc_id,
loglevel = resources.log_level
)
return encode_utf8(js), encode_utf8(tag)
def autoload_server(plot_object, app_path="/", session_id=DEFAULT_SESSION_ID, url="default", loglevel="info"):
''' Return a script tag that can be used to embed Bokeh Plots from
a Bokeh Server.
The data for the plot is stored on the Bokeh Server.
Args:
plot_object (PlotObject) : the object to render from the session, or None for entire document
app_path (str, optional) : the server path to the app we want to load
session_id (str, optional) : server session ID
url (str, optional) : server root URL (where static resources live, not where a specific app lives)
loglevel (str, optional) : "trace", "debug", "info", "warn", "error", "fatal"
Returns:
tag :
a ``<script>`` tag that will execute an autoload script
loaded from the Bokeh Server
'''
if url == "default":
url = DEFAULT_SERVER_HTTP_URL
elementid = str(uuid.uuid4())
# empty model_id means render the entire doc from session_id
model_id = ""
if plot_object is not None:
model_id = plot_object._id
if not url.endswith("/"):
url = url + "/"
if not app_path.endswith("/"):
app_path = app_path + "/"
if app_path.startswith("/"):
app_path = app_path[1:]
src_path = url + app_path + "autoload.js" + "?bokeh-autoload-element=" + elementid
tag = AUTOLOAD_TAG.render(
src_path = src_path,
elementid = elementid,
modelid = model_id,
sessionid = session_id,
loglevel = loglevel
)
return encode_utf8(tag)
def _script_for_render_items(docs_json, render_items, websocket_url,
custom_models, wrap_script=True):
# this avoids emitting the "register custom models" code at all
# just to register an empty set
if (custom_models is not None) and len(custom_models) == 0:
custom_models = None
plot_js = _wrap_in_function(
DOC_JS.render(
custom_models=custom_models,
websocket_url=websocket_url,
docs_json=serialize_json(docs_json),
render_items=serialize_json(render_items)
)
)
if wrap_script:
return SCRIPT_TAG.render(js_code=plot_js)
else:
return plot_js
def _html_page_for_render_items(resources, docs_json, render_items, title, websocket_url,
custom_models, js_resources=None, css_resources=None,
template=FILE, template_variables={}, use_widgets=True):
if resources:
if js_resources:
warn('Both resources and js_resources provided. resources will override js_resources.')
if css_resources:
warn('Both resources and css_resources provided. resources will override css_resources.')
js_resources = resources
css_resources = resources
bokeh_js = ''
if js_resources:
if not css_resources:
warn('No Bokeh CSS Resources provided to template. If required you will need to provide them manually.')
js_resources = js_resources.use_widgets(use_widgets)
bokeh_js = js_resources.render_js()
bokeh_css = ''
if css_resources:
if not js_resources:
warn('No Bokeh JS Resources provided to template. If required you will need to provide them manually.')
css_resources = css_resources.use_widgets(use_widgets)
bokeh_css = css_resources.render_css()
script = _script_for_render_items(docs_json, render_items, websocket_url, custom_models)
template_variables_full = template_variables.copy()
template_variables_full.update(dict(
title = title,
bokeh_js = bokeh_js,
bokeh_css = bokeh_css,
plot_script = script,
plot_div = "\n".join(_div_for_render_item(item) for item in render_items)
))
html = template.render(template_variables_full)
return encode_utf8(html)
def _check_plot_objects(plot_objects, allow_dict=False):
input_type_valid = False
# Check for single item
if isinstance(plot_objects, (PlotObject, Document)):
plot_objects = [plot_objects]
# Check for sequence
if isinstance(plot_objects, Sequence) and all(isinstance(x, (PlotObject, Document)) for x in plot_objects):
input_type_valid = True
if allow_dict:
if isinstance(plot_objects, dict) and \
all(isinstance(x, string_types) for x in plot_objects.keys()) and \
all(isinstance(x, (PlotObject, Document)) for x in plot_objects.values()):
input_type_valid = True
if not input_type_valid:
if allow_dict:
raise ValueError(
'Input must be a PlotObject, a Document, a Sequence of PlotObjects and Document, or a dictionary from string to PlotObject and Document'
)
else:
raise ValueError('Input must be a PlotObject, a Document, or a Sequence of PlotObjects and Document')
return plot_objects
def _check_one_plot_object(plot_object):
plot_objects = _check_plot_objects(plot_object)
if len(plot_objects) != 1:
raise ValueError("Input must be exactly one PlotObject or Document")
return plot_objects[0]
def _div_for_render_item(item):
return PLOT_DIV.render(elementid=item['elementid'])
def _standalone_docs_json_and_render_items(plot_objects):
plot_objects = _check_plot_objects(plot_objects)
render_items = []
docs_by_id = {}
for p in plot_objects:
modelid = None
if isinstance(p, Document):
doc = p
else:
if p.document is None:
raise ValueError("To render a PlotObject as HTML it must be part of a Document")
doc = p.document
modelid = p._id
docid = None
for key in docs_by_id:
if docs_by_id[key] == doc:
docid = key
if docid is None:
docid = str(uuid.uuid4())
docs_by_id[docid] = doc
elementid = str(uuid.uuid4())
render_items.append({
'docid' : docid,
'elementid' : elementid,
# if modelid is None, that means the entire document
'modelid' : modelid
})
docs_json = {}
for k, v in docs_by_id.items():
docs_json[k] = v.to_json()
return (docs_json, render_items)
# TODO this is a theory about what file_html() "should" be,
# with a more explicit name similar to the server names below,
# and without the jinja2 entanglement. Thus this encapsulates that
# we use jinja2 and encapsulates the exact template variables we require.
# Anyway, we should deprecate file_html or else drop this version,
# most likely.
def standalone_html_page_for_models(plot_objects, resources, title):
''' Return an HTML document that renders zero or more Bokeh documents or models.
The document for each model will be embedded directly in the HTML, so the
resulting HTML file is standalone (does not require a server). Depending
on the provided resources, the HTML file may be completely self-contained
or may have to load JS and CSS from different files.
Args:
plot_objects (PlotObject or Document) : Bokeh object to render
typically a PlotObject or a Document
resources (Resources) : a resource configuration for BokehJS assets
title (str) : a title for the HTML document ``<title>`` tags
Returns:
UTF-8 encoded HTML
'''
return file_html(plot_objects, resources, title)
def server_html_page_for_models(session_id, model_ids, resources, title, websocket_url):
render_items = []
for modelid in model_ids:
if modelid is None:
raise ValueError("None found in list of model_ids")
elementid = str(uuid.uuid4())
render_items.append({
'sessionid' : session_id,
'elementid' : elementid,
'modelid' : modelid
})
return _html_page_for_render_items(resources, {}, render_items, title,
websocket_url=websocket_url, custom_models=None)
def server_html_page_for_session(session_id, resources, title, websocket_url):
elementid = str(uuid.uuid4())
render_items = [{
'sessionid' : session_id,
'elementid' : elementid
# no 'modelid' implies the entire session document
}]
return _html_page_for_render_items(resources, {}, render_items, title,
websocket_url=websocket_url, custom_models=None)
|
gpfreitas/bokeh
|
bokeh/embed.py
|
Python
|
bsd-3-clause
| 20,330
| 0.005804
|
# -*- coding: utf-8 -*-
from test_settings import Settings
class TestCase(Settings):
def test_sidebar(self):
# Ayarlari yapiyor.
self.do_settings()
# Genel'e tikliyor.
self.driver.find_element_by_css_selector(
'li.ng-binding:nth-child(3) > a:nth-child(1) > span:nth-child(2)').click()
# Ogrenci Iletisim Bilgilerine tikliyor.
self.driver.find_element_by_css_selector('ul.in:nth-child(2) > li:nth-child(2) > a:nth-child(1)').click()
self.do_login()
# Genel'e tikliyor.
self.driver.find_element_by_css_selector(
'li.ng-binding:nth-child(3) > a:nth-child(1) > span:nth-child(2)').click()
# Ogrenci Iletisim Bilgilerine tikliyor.
self.driver.find_element_by_css_selector('ul.in:nth-child(2) > li:nth-child(2) > a:nth-child(1)').click()
# Ikamet Il'e deger gonderiyor.
self.driver.find_element_by_css_selector('#ikamet_il').send_keys('Bilecik')
# Ikamet Ilce'ye deger gonderiyor.
self.driver.find_element_by_css_selector('#ikamet_ilce').send_keys('Merkez')
# Ikametgah Adresine deger yolluyor.
self.driver.find_element_by_css_selector('#ikamet_adresi').send_keys('balim sokak')
# Posta Kodu'na deger yolluyor.
self.driver.find_element_by_css_selector('#posta_kodu').send_keys('11000')
# Telefon Numarasi'na deger yolluyor.
self.driver.find_element_by_css_selector('#tel_no').send_keys('0534626286816')
# Kaydet'e tikliyor
self.driver.find_element_by_css_selector('button.btn-danger:nth-child(1)').click()
|
zetaops/ulakbus
|
selenium_tests/test_ogrenci_iletisim_bilgileri.py
|
Python
|
gpl-3.0
| 1,614
| 0.006196
|
from __future__ import unicode_literals
import json
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from mock import patch
from temba.contacts.models import ContactField
from temba.flows.models import RuleSet
from temba.orgs.models import Language
from temba.tests import FlowFileTest
from .models import Value
class ResultTest(FlowFileTest):
def assertResult(self, result, index, category, count):
self.assertEquals(count, result['categories'][index]['count'])
self.assertEquals(category, result['categories'][index]['label'])
def test_field_results(self):
(c1, c2, c3, c4) = (self.create_contact("Contact1", '0788111111'),
self.create_contact("Contact2", '0788222222'),
self.create_contact("Contact3", '0788333333'),
self.create_contact("Contact4", '0788444444'))
# create a gender field that uses strings
gender = ContactField.get_or_create(self.org, self.admin, 'gender', label="Gender", value_type=Value.TYPE_TEXT)
c1.set_field(self.user, 'gender', "Male")
c2.set_field(self.user, 'gender', "Female")
c3.set_field(self.user, 'gender', "Female")
result = Value.get_value_summary(contact_field=gender)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset']) # this is two as we have the default contact created by our unit tests
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Female", 2)
self.assertResult(result, 1, "Male", 1)
# create an born field that uses decimals
born = ContactField.get_or_create(self.org, self.admin, 'born', label="Born", value_type=Value.TYPE_DECIMAL)
c1.set_field(self.user, 'born', 1977)
c2.set_field(self.user, 'born', 1990)
c3.set_field(self.user, 'born', 1977)
result = Value.get_value_summary(contact_field=born)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset'])
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "1977", 2)
self.assertResult(result, 1, "1990", 1)
# ok, state field!
state = ContactField.get_or_create(self.org, self.admin, 'state', label="State", value_type=Value.TYPE_STATE)
c1.set_field(self.user, 'state', "Kigali City")
c2.set_field(self.user, 'state', "Kigali City")
result = Value.get_value_summary(contact_field=state)[0]
self.assertEquals(1, len(result['categories']))
self.assertEquals(2, result['set'])
self.assertEquals(3, result['unset'])
self.assertResult(result, 0, "1708283", 2)
reg_date = ContactField.get_or_create(self.org, self.admin, 'reg_date', label="Registration Date", value_type=Value.TYPE_DATETIME)
now = timezone.now()
c1.set_field(self.user, 'reg_date', now.replace(hour=9))
c2.set_field(self.user, 'reg_date', now.replace(hour=4))
c3.set_field(self.user, 'reg_date', now - timedelta(days=1))
result = Value.get_value_summary(contact_field=reg_date)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset'])
self.assertResult(result, 0, now.replace(hour=0, minute=0, second=0, microsecond=0), 2)
self.assertResult(result, 1, (now - timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0), 1)
# make sure categories returned are sorted by count, not name
c2.set_field(self.user, 'gender', "Male")
result = Value.get_value_summary(contact_field=gender)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset']) # this is two as we have the default contact created by our unit tests
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Male", 2)
self.assertResult(result, 1, "Female", 1)
# check the modified date is tracked for fields
original_value = Value.objects.get(contact=c1, contact_field=gender)
c1.set_field(self.user, 'gender', 'unknown')
new_value = Value.objects.get(contact=c1, contact_field=gender)
self.assertTrue(new_value.modified_on > original_value.modified_on)
self.assertNotEqual(new_value.string_value, original_value.string_value)
def run_color_gender_flow(self, contact, color, gender, age):
self.assertEqual(self.send_message(self.flow, color, contact=contact, restart_participants=True), "What is your gender?")
self.assertEqual(self.send_message(self.flow, gender, contact=contact), "What is your age?")
self.assertEqual(self.send_message(self.flow, age, contact=contact), "Thanks.")
def setup_color_gender_flow(self):
self.flow = self.get_flow('color_gender_age')
(self.c1, self.c2, self.c3, self.c4) = (self.create_contact("Contact1", '0788111111'),
self.create_contact("Contact2", '0788222222'),
self.create_contact("Contact3", '0788333333'),
self.create_contact("Contact4", '0788444444'))
def test_category_results(self):
self.setup_color_gender_flow()
# create a state field:
# assign c1 and c2 to Kigali
state = ContactField.get_or_create(self.org, self.admin, 'state', label="State", value_type=Value.TYPE_STATE)
district = ContactField.get_or_create(self.org, self.admin, 'district', label="District", value_type=Value.TYPE_DISTRICT)
self.c1.set_field(self.user, 'state', "Kigali City")
self.c1.set_field(self.user, 'district', "Kigali")
self.c2.set_field(self.user, 'state', "Kigali City")
self.c2.set_field(self.user, 'district', "Kigali")
self.run_color_gender_flow(self.c1, "red", "male", "16")
self.run_color_gender_flow(self.c2, "blue", "female", "19")
self.run_color_gender_flow(self.c3, "green", "male", "75")
self.run_color_gender_flow(self.c4, "maroon", "female", "50")
# create a group of the women
ladies = self.create_group("Ladies", [self.c2, self.c4])
# get our rulesets
color = RuleSet.objects.get(flow=self.flow, label="Color")
gender = RuleSet.objects.get(flow=self.flow, label="Gender")
age = RuleSet.objects.get(flow=self.flow, label="Age")
# categories should be in the same order as our rules, should have correct counts
result = Value.get_value_summary(ruleset=color)[0]
self.assertEquals(3, len(result['categories']))
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Red", 2)
self.assertResult(result, 1, "Blue", 1)
self.assertResult(result, 2, "Green", 1)
# check our age category as well
result = Value.get_value_summary(ruleset=age)[0]
self.assertEquals(3, len(result['categories']))
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Child", 1)
self.assertResult(result, 1, "Adult", 2)
self.assertResult(result, 2, "Senior", 1)
# and our gender categories
result = Value.get_value_summary(ruleset=gender)[0]
self.assertEquals(2, len(result['categories']))
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Male", 2)
self.assertResult(result, 1, "Female", 2)
# now filter the results and only get responses by men
result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male"])])[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 0)
self.assertResult(result, 2, "Green", 1)
# what about men that are adults?
result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male"]),
dict(ruleset=age.pk, categories=["Adult"])])[0]
self.assertResult(result, 0, "Red", 0)
self.assertResult(result, 1, "Blue", 0)
self.assertResult(result, 2, "Green", 0)
# union of all genders
result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male", "Female"]),
dict(ruleset=age.pk, categories=["Adult"])])[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 1)
self.assertResult(result, 2, "Green", 0)
# just women adults by group
result = Value.get_value_summary(ruleset=color, filters=[dict(groups=[ladies.pk]), dict(ruleset=age.pk, categories="Adult")])[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 1)
self.assertResult(result, 2, "Green", 0)
# remove one of the women from the group
ladies.update_contacts(self.user, [self.c2], False)
# get a new summary
result = Value.get_value_summary(ruleset=color, filters=[dict(groups=[ladies.pk]), dict(ruleset=age.pk, categories="Adult")])[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 0)
self.assertResult(result, 2, "Green", 0)
# ok, back in she goes
ladies.update_contacts(self.user, [self.c2], True)
# do another run for contact 1
run5 = self.run_color_gender_flow(self.c1, "blue", "male", "16")
# totals should reflect the new value, not the old
result = Value.get_value_summary(ruleset=color)[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 2)
self.assertResult(result, 2, "Green", 1)
# what if we do a partial run?
self.send_message(self.flow, "red", contact=self.c1, restart_participants=True)
# should change our male/female breakdown since c1 now no longer has a gender
result = Value.get_value_summary(ruleset=gender)[0]
self.assertEquals(2, len(result['categories']))
self.assertResult(result, 0, "Male", 1)
self.assertResult(result, 1, "Female", 2)
# back to a full flow
run5 = self.run_color_gender_flow(self.c1, "blue", "male", "16")
# ok, now segment by gender
result = Value.get_value_summary(ruleset=color, filters=[], segment=dict(ruleset=gender.pk, categories=["Male", "Female"]))
male_result = result[0]
self.assertResult(male_result, 0, "Red", 0)
self.assertResult(male_result, 1, "Blue", 1)
self.assertResult(male_result, 2, "Green", 1)
female_result = result[1]
self.assertResult(female_result, 0, "Red", 1)
self.assertResult(female_result, 1, "Blue", 1)
self.assertResult(female_result, 2, "Green", 0)
# segment by gender again, but use the contact field to do so
result = Value.get_value_summary(ruleset=color, filters=[], segment=dict(contact_field="Gender", values=["MALE", "Female"]))
male_result = result[0]
self.assertResult(male_result, 0, "Red", 0)
self.assertResult(male_result, 1, "Blue", 1)
self.assertResult(male_result, 2, "Green", 1)
female_result = result[1]
self.assertResult(female_result, 0, "Red", 1)
self.assertResult(female_result, 1, "Blue", 1)
self.assertResult(female_result, 2, "Green", 0)
# add in a filter at the same time
result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=color.pk, categories=["Blue"])],
segment=dict(ruleset=gender.pk, categories=["Male", "Female"]))
male_result = result[0]
self.assertResult(male_result, 0, "Red", 0)
self.assertResult(male_result, 1, "Blue", 1)
self.assertResult(male_result, 2, "Green", 0)
female_result = result[1]
self.assertResult(female_result, 0, "Red", 0)
self.assertResult(female_result, 1, "Blue", 1)
self.assertResult(female_result, 2, "Green", 0)
# ok, try segmenting by location instead
result = Value.get_value_summary(ruleset=color, segment=dict(location="State"))
eastern_result = result[0]
self.assertEquals('171591', eastern_result['boundary'])
self.assertEquals('Eastern Province', eastern_result['label'])
self.assertResult(eastern_result, 0, "Red", 0)
self.assertResult(eastern_result, 1, "Blue", 0)
self.assertResult(eastern_result, 2, "Green", 0)
kigali_result = result[1]
self.assertEquals('1708283', kigali_result['boundary'])
self.assertEquals('Kigali City', kigali_result['label'])
self.assertResult(kigali_result, 0, "Red", 0)
self.assertResult(kigali_result, 1, "Blue", 2)
self.assertResult(kigali_result, 2, "Green", 0)
# updating state location leads to updated data
self.c2.set_field(self.user, 'state', "Eastern Province")
result = Value.get_value_summary(ruleset=color, segment=dict(location="State"))
eastern_result = result[0]
self.assertEquals('171591', eastern_result['boundary'])
self.assertEquals('Eastern Province', eastern_result['label'])
self.assertResult(eastern_result, 0, "Red", 0)
self.assertResult(eastern_result, 1, "Blue", 1)
self.assertResult(eastern_result, 2, "Green", 0)
kigali_result = result[1]
self.assertEquals('1708283', kigali_result['boundary'])
self.assertEquals('Kigali City', kigali_result['label'])
self.assertResult(kigali_result, 0, "Red", 0)
self.assertResult(kigali_result, 1, "Blue", 1)
self.assertResult(kigali_result, 2, "Green", 0)
# segment by district instead
result = Value.get_value_summary(ruleset=color, segment=dict(parent="1708283", location="District"))
# only on district in kigali
self.assertEquals(1, len(result))
kigali_result = result[0]
self.assertEquals('60485579', kigali_result['boundary'])
self.assertEquals('Kigali', kigali_result['label'])
self.assertResult(kigali_result, 0, "Red", 0)
self.assertResult(kigali_result, 1, "Blue", 2)
self.assertResult(kigali_result, 2, "Green", 0)
# do a sanity check on our choropleth view
self.login(self.admin)
response = self.client.get(reverse('flows.ruleset_choropleth', args=[color.pk]) +
"?_format=json&boundary=" + self.org.country.osm_id)
# response should be valid json
response = json.loads(response.content)
# should have breaks
self.assertTrue('breaks' in response)
# should have two categories, Blue and Others
self.assertEquals(2, len(response['categories']))
self.assertEquals("Blue", response['categories'][0])
self.assertEquals("Others", response['categories'][1])
# assert our kigali result
kigali_result = response['scores']['1708283']
self.assertEquals(1, kigali_result['score'])
self.assertEquals("Kigali City", kigali_result['name'])
self.assertEquals("Blue", kigali_result['results'][0]['label'])
self.assertEquals("Others", kigali_result['results'][1]['label'])
self.assertEquals(1, kigali_result['results'][0]['count'])
self.assertEquals(0, kigali_result['results'][1]['count'])
self.assertEquals(100, kigali_result['results'][0]['percentage'])
self.assertEquals(0, kigali_result['results'][1]['percentage'])
with patch('temba.values.models.Value.get_value_summary') as mock:
mock.return_value = []
response = self.client.get(reverse('flows.ruleset_choropleth', args=[color.pk]) +
"?_format=json&boundary=" + self.org.country.osm_id)
# response should be valid json
response = json.loads(response.content)
# should have two categories, Blue and Others
self.assertEquals(2, len(response['categories']))
self.assertEquals("", response['categories'][0])
self.assertEquals("", response['categories'][1])
# all counts and percentage are 0
self.assertEquals(0, response['totals']['count'])
self.assertEquals(0, response['totals']['results'][0]['count'])
self.assertEquals(0, response['totals']['results'][0]['percentage'])
self.assertEquals(0, response['totals']['results'][1]['count'])
self.assertEquals(0, response['totals']['results'][1]['percentage'])
# and empty string labels
self.assertEquals("", response['totals']['results'][0]['label'])
self.assertEquals("", response['totals']['results'][1]['label'])
# also check our analytics view
response = self.client.get(reverse('flows.ruleset_analytics'))
# make sure we have only one flow in it
flows = json.loads(response.context['flows'])
self.assertEquals(1, len(flows))
self.assertEquals(3, len(flows[0]['rules']))
def test_open_ended_word_frequencies(self):
flow = self.get_flow('random_word')
def run_flow(contact, word):
self.assertEquals("Thank you", self.send_message(flow, word, contact=contact, restart_participants=True))
(c1, c2, c3, c4, c5, c6) = (self.create_contact("Contact1", '0788111111'),
self.create_contact("Contact2", '0788222222'),
self.create_contact("Contact3", '0788333333'),
self.create_contact("Contact4", '0788444444'),
self.create_contact("Contact5", '0788555555'),
self.create_contact("Contact6", '0788666666', is_test=True))
run_flow(c1, "1 better place")
run_flow(c2, "the great coffee")
run_flow(c3, "1 cup of black tea")
run_flow(c4, "awesome than this encore")
run_flow(c5, "from an awesome place in kigali")
run_flow(c6, "awesome coffee")
random = RuleSet.objects.get(flow=flow, label="Random")
result = Value.get_value_summary(ruleset=random)[0]
self.assertEquals(10, len(result['categories']))
self.assertTrue(result['open_ended'])
self.assertResult(result, 0, "awesome", 2)
self.assertResult(result, 1, "place", 2)
self.assertResult(result, 2, "better", 1)
self.assertResult(result, 3, "black", 1)
self.assertResult(result, 4, "coffee", 1)
self.assertResult(result, 5, "cup", 1)
self.assertResult(result, 6, "encore", 1)
self.assertResult(result, 7, "great", 1)
self.assertResult(result, 8, "kigali", 1)
self.assertResult(result, 9, "tea", 1)
# add French to org languages
Language.create(self.org, self.admin, 'French', 'fre')
# make sure we cleared the cache
Value.invalidate_cache(ruleset=random)
# encore is a french stop word and should not be included this time
result = Value.get_value_summary(ruleset=random)[0]
self.assertEquals(9, len(result['categories']))
self.assertTrue(result['open_ended'])
self.assertResult(result, 0, "awesome", 2)
self.assertResult(result, 1, "place", 2)
self.assertResult(result, 2, "better", 1)
self.assertResult(result, 3, "black", 1)
self.assertResult(result, 4, "coffee", 1)
self.assertResult(result, 5, "cup", 1)
self.assertResult(result, 6, "great", 1)
self.assertResult(result, 7, "kigali", 1)
self.assertResult(result, 8, "tea", 1)
|
reyrodrigues/EU-SMS
|
temba/values/tests.py
|
Python
|
agpl-3.0
| 20,151
| 0.00263
|
# written in python 3.6.1
#-*- coding: utf-8 -*-
from urllib.request import urlopen
import json
import string
import re
from bs4 import BeautifulSoup
import logging
import time
FILE_PATH = "./boxofficemojo.com/movie_data.txt"
LOG_PATH = "./boxofficemojo.com/scraping.log"
logging.basicConfig(filename=LOG_PATH,level=logging.DEBUG)
Keys = ["Name", "URL", "Genre","Runtime", "Rating", "MovieRanking"
, "PercentageofTotalGross", "WidestRelease", "CloseDate", "InRelease", "TotalGross"
, "Distributor", "Budget", "Domestic_Gross", "Domestic_Percentage"
, "Foreign_Gross", "Foreign_Percentage", "Worldwide_Gross", "OpeningWeekend"
, "Countryclicktoviewweekendbreakdown", "Dist", "ReleaseDate"
, "OpeningWknd", "ofTotal", "TotalGross", "AsOf"]
def add_empty_data(arrData, count):
for i in range(0,count):
arrData.append(" ")
return arrData
def remove_special_chars(dictData):
newDict= {}
for key in dictData:
new_key= re.sub(r'\W+', '', key)
newDict[new_key] = dictData[key]
return newDict
def save_to_json(filePath, dictData, countriesData=None):
dictData = remove_special_chars(dictData)
countriesData = remove_special_chars(countriesData)
if countriesData:
merged = dict(dictData)
merged.update(countriesData)
dictData = merged
with open(filePath, "a") as outfile:
json.dump(dictData, outfile, ensure_ascii=False)
def write_header(filePath):
# Write a header
text_file = open(filePath, "ab")
for header in Keys:
text_file.write((header + u"|").encode('utf-8'))
text_file.write("\n".encode('utf-8'))
text_file.close()
def save_to_file(filePath, dictData, countriesData=None):
dictData = remove_special_chars(dictData)
if countriesData:
countriesData = remove_special_chars(countriesData)
if countriesData:
merged = dict(dictData)
merged.update(countriesData)
dictData = merged
Arranged= []
add_empty_data(Arranged, 50)
text_file = open(filePath, "ab")
for key, value in dictData.items():
for i ,k in enumerate(Keys):
if key == k:
Arranged[i]= value
for data in Arranged:
text_file.write((data + u"|").encode('utf-8'))
text_file.write("\n".encode('utf-8'))
text_file.close()
def get_total_lifetime_grosses(link, arrData):
url = "http://www.boxofficemojo.com"+ link
page = urlopen(url)
soup = BeautifulSoup(page, "lxml")
# Assume that domestic info is from USA
arrData['Countryclicktoviewweekendbreakdown']= "USA"
#print(main_tbl)
tables = soup.find_all('table', attrs={'border': '0' , 'cellspacing':'0', 'cellpadding':'0' , 'width':'100%'})
#print( len(tables))
#td_count = 9
if len(tables) == 4:
#print(tables[3]) # Total lifetime grosses
mp_boxes= tables[3].find_all("div", {"class", "mp_box_tab"})
a= len(mp_boxes)
for box in mp_boxes:
if(box.text == "Total Lifetime Grosses"):
div_content= box.findNext('div')
trs = div_content.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
if len(tds) == 3:
if tds[0].text.strip() == "Domestic:":
arrData["Total Gross"] = tds[1].text.strip()
arrData["% ofTotal"] = tds[2].text.strip()
arrData[tds[0].text.strip()+"_Gross"] = tds[1].text.strip()
arrData[tds[0].text.strip()+"_Percentage"] = tds[2].text.strip()
if(box.text == "Domestic Summary"):
div_content = box.findNext('div')
DS_tables = div_content.find_all('table', attrs = { 'border': '0' , 'cellspacing':'0', 'cellpadding':'0'})
for DS_table in DS_tables:
DS_trs = DS_table.find_all('tr')
for DS_tr in DS_trs:
DS_tr_title = DS_tr.td.text.strip()
if(DS_tr_title == "Opening\xa0Weekend:") or (DS_tr_title == "Opening Weekend:"):
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData["Opening Weekend"] = DS_tr_content.text.strip()
arrData["OpeningWknd"] = DS_tr_content.text.strip()
elif "(#" in DS_tr_title:
arrData['Movie Ranking'] = DS_tr_title
elif "%\xa0of\xa0Total\xa0Gross" in DS_tr_title or "% of Total Gross" in DS_tr_title:
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['Percentage of Total Gross'] = DS_tr_content.text.strip()
elif DS_tr_title == "Widest\xa0Release:" or DS_tr_title == "Widest Release:":
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['Widest Release'] = DS_tr_content.text.strip() # 14.
elif DS_tr_title == "Close\xa0Date:" or DS_tr_title == "Close Date:":
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['Close Date'] = DS_tr_content.text.strip() # 15.
elif DS_tr_title == "In\xa0Release:" or DS_tr_title == "In Release:":
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['In Release'] = DS_tr_content.text.strip() # 15.
if(box.text == "The Players"):
#print(box.findNext('div'))
pass
return arrData
def get_movie_foreign(link, arrData):
try:
eachCountry = {}
ColumnHeaders= []
url = "http://www.boxofficemojo.com"+ link + "&page=intl"
page = urlopen(url)
soup = BeautifulSoup(page, "lxml")
contents = soup.find('table', attrs={'border': '3' , 'cellspacing':'0', 'cellpadding':'5', 'align':'center', 'style':'margin-top: 5px;'})
if len(contents) == 1:
#print(contents)
intl_table = contents.tr.td.table
if intl_table:
trs = intl_table.find_all("tr")
if len(trs) == 3:
#print ("no data")
temp= 0
else:
for row,tr in enumerate(trs):
if row == 0:
tds= tr.find_all("td") # get each header's text
for td in tds:
header= td.text.strip()
if "/" in header:
divided_header = header.split('/')
ColumnHeaders.append(divided_header[0])
ColumnHeaders.append(divided_header[1])
else:
ColumnHeaders.append(td.text.strip())
if(row < 3): # don't save unncessary data
continue
tds= tr.find_all("td")
for column, td in enumerate(tds):
# 11. Country, 12.Dist, 13. Release Date, 14.OW, 15.% of Total, 16.Total gross, 17. as of
eachCountry[ColumnHeaders[column]] = td.text.strip()
save_to_file(FILE_PATH, arrData, eachCountry)
#save_to_json(FILE_PATH, arrData, eachCountry)
eachCountry.clear()
return arrData
except Exception as e:
logging.exception(e)
return arrData
def get_movie_detail(movies_list, link, arrData):
if link not in movies_list:
movies_list.append(link)
url = "http://www.boxofficemojo.com"+ link # 1. URL
page = urlopen(url)
soup = BeautifulSoup(page, "lxml")
contents= soup.find('table', attrs={'border': '0' , 'cellspacing':'1', 'cellpadding':'4' , 'bgcolor':'#dcdcdc', 'width':'95%'})
tabledata = contents.find_all("td")
name_table = soup.find('table', attrs={'border': '0' , 'cellspacing':'0', 'cellpadding':'0' , 'width':'100%', 'style':'padding-top: 5px;'})
name = name_table.font.b.getText() # 0. Name
# 2. Distributor, 3. Release Date, 4. Genre, 5. Runtime, 6. Rating, 7. Budget, 8. TotalGross
arrData['Name'] = name
arrData['URL'] = url
if len(tabledata) == 6:
Distributor = tabledata[0].b.getText()
ReleaseDate = tabledata[1].b.getText()
Genre = tabledata[2].b.getText()
Runtime = tabledata[3].b.getText()
Rating = tabledata[4].b.getText()
Budget = tabledata[5].b.getText()
arrData['Distributor'] = Distributor
arrData['ReleaseDate'] = ReleaseDate
arrData['Genre'] = Genre
arrData['Runtime'] = Runtime
arrData['Rating'] = Rating
arrData['Budget'] = Budget
#arrData.extend([name , url , Distributor, ReleaseDate,Genre ,Runtime , Rating,Budget])
#add_empty_data(arrData, 1) # match gap for missing column
elif len(tabledata) == 7:
TotalGross = tabledata[0].b.getText()
Distributor = tabledata[1].b.getText()
ReleaseDate = tabledata[2].b.getText()
Genre = tabledata[3].b.getText()
Runtime = tabledata[4].b.getText()
Rating = tabledata[5].b.getText()
Budget = tabledata[6].b.getText()
arrData['TotalGross'] = TotalGross
arrData['Distributor'] = Distributor
arrData['ReleaseDate'] = ReleaseDate
arrData['Genre'] = Genre
arrData['Runtime'] = Runtime
arrData['Rating'] = Rating
arrData['Budget'] = Budget
#arrData.extend([ name , url , Distributor, ReleaseDate,Genre ,Runtime , Rating,Budget ,TotalGross])
#print (result)
#print contents2[0]
return arrData
def get_all_movies():
# Alphabet loop for how movies are indexed including
# movies that start with a special character or number
index = ["NUM"] + list(string.ascii_uppercase)
# List of movie urls
movies_list = []
# dict data
arrData = {}
startTime = time.time()
lapTime= 0.0
# if you want to jump directly to somewhere (Set None to be not skipped)
JumpTo = 'S'
IsJumpTarget = False
JumpToPage = 8
write_header(FILE_PATH)
logging.debug("running...start at : " + str(time.time()))
# Loop through the pages for each letter
for letter_idx, letter in enumerate(index):
if JumpTo:
indexOfTargetLetter = index.index(JumpTo)
if letter_idx < indexOfTargetLetter:
logging.debug("skip this letter")
IsJumpTarget= False
continue
elif letter_idx == indexOfTargetLetter:
IsJumpTarget= True
url = ("http://www.boxofficemojo.com/movies/alphabetical.htm?letter=" + letter)
page1 = urlopen(url)
soup1 = BeautifulSoup(page1, "lxml")
navi = soup1.find('div', attrs={"class" : "alpha-nav-holder"})
bs= navi.font.find_all('b')
count_bs= len(bs)
logging.debug("pages count : " + str(count_bs))
if letter == "NUM":
count_bs = 1
# Loop through the pages within each letter
for num in range(1, count_bs+1):
logging.debug("begin to scrap letter : " + letter + ", page : " + str(num))
if JumpToPage:
if num < JumpToPage and IsJumpTarget == True: # do not jump this page, if it's not target letter
logging.debug("skip this page")
continue
url = ("http://www.boxofficemojo.com/movies/alphabetical.htm?"
"letter=" + letter + "&page=" + str(num))
try:
page = urlopen(url)
soup = BeautifulSoup(page, "lxml")
rows = soup.find(id="body").find("table").find("table").find_all(
"table")[1].find_all("tr")
# skip index row
if len(rows) > 1:
counter = 1
for row in rows:
trackingStartTime= time.time()
# skip index row
if counter > 1:
link = row.td.font.a['href']
arrData = get_movie_detail(movies_list, link, arrData)
arrData = get_movie_foreign(link, arrData)
arrData = get_total_lifetime_grosses(link, arrData)
save_to_file(FILE_PATH, arrData)
arrData.clear()
lapTime= time.time() - trackingStartTime
logging.debug("each movie's lapTime : " + str(lapTime))
counter += 1
except Exception as e:
logging.exception(e)
TotalElaspedTime= (time.time() - startTime)
logging.debug('done.' + str(TotalElaspedTime))
get_all_movies()
|
softinus/Movie_DataMiner
|
boxofficemojo.com/BoxOfficeMojo_Scraping_Code.py
|
Python
|
apache-2.0
| 13,739
| 0.011427
|
import booby
from booby import fields
from booby.inspection import get_fields, is_model
from booby.validators import Required
from pydoc import locate
from collections import OrderedDict
from collections import OrderedDict
from tabulate import tabulate
import readline
MODEL_MAP = {}
class tabCompleter(object):
"""
A tab completer that can either complete from
the filesystem or from a list.
Partially taken from:
http://stackoverflow.com/questions/5637124/tab-completion-in-pythons-raw-input
"""
def createListCompleter(self, ll):
"""
This is a closure that creates a method that autocompletes from
the given list.
Since the autocomplete function can't be given a list to complete from
a closure is used to create the listCompleter function with a list to complete
from.
"""
def listCompleter(text, state):
line = readline.get_line_buffer()
if not line:
return [c + " " for c in ll][state]
else:
return [c + " " for c in ll if c.startswith(line)][state]
self.listCompleter = listCompleter
def ensure_json_value(value):
if is_model(value):
return dict(value)
else:
return value
def ensure_json(value):
if isinstance(value, (list, tuple)):
return [ensure_json_value(w) for w in value]
else:
return ensure_json_value(value)
class EditModel(object):
def __init__(self, model_type, current_value, help_map):
self.model_type = model_type
self.current_value = current_value
self.new_value = {}
self.help_map = help_map
def get_fields(self):
required_details = OrderedDict()
non_required_details = OrderedDict()
for k, f in sorted(get_fields(self.model_type).iteritems()):
if is_required(f):
required_details[k] = f
else:
non_required_details[k] = f
details = OrderedDict()
for k, f in required_details.iteritems():
details[k] = f
for k, f in non_required_details.iteritems():
details[k] = f
return details
def edit_field(self, field_name):
new_field_value = self.ask_field(field_name)
# field = get_fields(self.current_value).get(field_name)
value = ensure_json(new_field_value)
self.new_value[field_name] = value
def ask_field(self, field_name):
field_type = self.model_type.__dict__.get(field_name, None)
if not field_type:
print "No field of that name."
new_value = ask_detail_for_field(
field_name, field_type, None, self.help_map)
if is_model(new_value):
new_value = new_value.to_json()
return new_value
def print_current(self):
fields = self.get_fields()
table = []
i = 1
for k, v in fields.iteritems():
value = getattr(self.current_value, k, None)
row = [k, convert_for_print(value)]
table.append(row)
i = i + 1
print tabulate(table)
def print_new(self):
print self.new_value
def convert_value_to_print(value):
f = getattr(value, 'to_json', None)
if callable(f):
value = value.to_json()
return value
def convert_for_print(value):
if isinstance(value, (list, tuple)):
if len(value) > 0:
value = (convert_value_to_print(w) for w in value)
value = "[" + ", ".join(value) + "]"
else:
value = ""
else:
value = convert_value_to_print(value)
return value
def get_type(model):
if type(model) == fields.Integer or model == fields.Integer:
return 'Integer'
elif type(model) == fields.String or model == fields.String:
return 'String'
else:
return model.__name__
def is_required(field):
return next((True for x in field.validators if isinstance(x, Required)), False)
def convert_to_proper_base_type(base_type, value):
'''
Converts the string input in the appropriate value type.
'''
if get_type(base_type) == 'Integer':
return int(value)
elif get_type(base_type) == 'String':
return value
elif get_type(base_type) == 'Boolean':
return bool(value)
else:
return value
def edit_details_for_type(model_type, old_object, help_map={}):
'''
Asks for user input to change an existing model.
'''
m = EditModel(model_type, old_object, help_map)
print
print "Current values:"
print
m.print_current()
print
selection = "xxx"
print
print "Caution: the new value will replace the old value, not be added to it."
print
while selection:
selection = raw_input("field to edit ('enter' to finish): ")
if selection:
print
m.edit_field(selection)
print
return m.new_value
def ask_details_for_type(model_type, ask_only_required=True, help_map={}):
'''
Asks for user input to create an object of a specified type.
If the type is registered in a model/builder map, the function associated
with this type is used to create the object instead of the auto-generated
query.
'''
if MODEL_MAP.get(model_type, None):
func = MODEL_MAP[model_type]
return func()
required_details = OrderedDict()
non_required_details = OrderedDict()
values = {}
for k, f in sorted(get_fields(model_type).iteritems()):
if is_required(f):
required_details[k] = f
else:
non_required_details[k] = f
print
print "Enter values for fields below. Enter '?' or '? arg1 [arg2]' for help for each field."
print
print "Required fields:"
print "----------------"
print
for k, f in required_details.iteritems():
while True:
value = ask_detail_for_field(k, f, ask_only_required, help_map)
if value:
values[k] = value
break
else:
print
print "This is a required field, please enter value for {}.".format(k)
print
if not ask_only_required:
print
print "Optional fields, press 'Enter' to ignore a field."
print "-------------------------------------------------"
print
for k, f in non_required_details.iteritems():
value = ask_detail_for_field(k, f, ask_only_required, help_map)
if value:
values[k] = value
print
obj = model_type(**values)
return obj
def ask_collection_detail(name, detail_type, ask_only_required=True, help_map={}):
result = []
print "Enter details for '{}', multiple entries possible, press enter to continue to next field.".format(name)
print
while True:
cd = ask_detail_for_field(
name, detail_type, ask_only_required, help_map)
if not cd:
break
else:
result.append(cd)
return result
def parse_for_help(answer, help_func):
if answer.startswith('?'):
args = answer.split(' ')[1:]
if not help_func:
print 'Sorry, no help available for this field.'
else:
print
help_func(*args)
print
return True
else:
return False
def ask_simple_field(name, field_type, help_map={}):
type_name = get_type(field_type)
answer = raw_input(" - {} ({}): ".format(name, type_name))
if not answer:
return None
if parse_for_help(answer, help_map.get(name, None)):
return ask_simple_field(name, field_type, help_map)
try:
value = convert_to_proper_base_type(field_type, answer)
except Exception as e:
print "Can't convert input: ", e
return ask_simple_field(name, field_type, help_map)
return value
def ask_detail_for_field(name, detail_type, ask_only_required=True, help_map={}):
value = None
if MODEL_MAP.get(type(detail_type), None):
func = MODEL_MAP[type(detail_type)]
value = func()
return value
# collections are a special case
if type(detail_type) == booby.fields.Collection:
# collection
value = ask_collection_detail(
name, detail_type.model, ask_only_required, help_map)
elif is_model(detail_type):
# collection, and model field
value = ask_details_for_type(detail_type, ask_only_required, help_map)
elif issubclass(type(detail_type), booby.fields.Field):
# non-collection, and non-model field
value = ask_simple_field(name, type(detail_type), help_map)
elif issubclass(detail_type, booby.fields.Field):
# collection, and non-model field
value = ask_simple_field(name, detail_type, help_map)
return value
|
makkus/pyclist
|
pyclist/model_helpers.py
|
Python
|
apache-2.0
| 8,972
| 0.000892
|
import urllib2, json, time, sys
from datetime import date, datetime
from dateutil.rrule import rrule, DAILY
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", dest="fahrenheit", action="store", default=False, type="string", help="Convert to FAHRENHEIT")
parser.add_option("-e", dest="end", action="store", default=False, type="string", help="START date")
parser.add_option("-s", dest="start", action="store", default=False, type="string", help="END date")
parser.add_option("-t", dest="token", action="store", default=False, type="string", help="Weather Underground TOKEN")
(options, args) = parser.parse_args()
if options.token:
token = options.token
else:
parser.print_help()
sys.exit()
if options.start:
start = options.start
else:
parser.print_help()
sys.exit()
if options.end:
end = options.end
else:
parser.print_help()
sys.exit()
if options.fahrenheit:
fahrenheit = True
else:
fahrenheit = False
start = datetime.strptime(start,'%Y-%m-%d')
end = datetime.strptime(end,'%Y-%m-%d')
url = ""
if end < start:
print "Error: end date " + str(end) + " occurs before start date " + str(start)
sys.exit()
for dt in rrule(DAILY, dtstart=start, until=end):
total = 0.0
temp = 0.0
count = 0
wunderground_url ="http://api.wunderground.com/api/" + token + "/history_" + dt.strftime("%Y%m%d") +"/q/NY/New_York_City.json"
try:
url = urllib2.urlopen(wunderground_url)
parsed_json = json.loads(url.read())
except:
print "Error reading URL " + wunderground_url
print "Is your token correct?"
url.close()
sys.exit()
try:
for mean in parsed_json['history']['observations']:
if fahrenheit:
total += float(mean['tempi'])
else:
total += float(mean['tempm'])
count += 1
temp = (total / count)
print dt.strftime("%Y-%m-%d") + "," + str(temp)
except:
print "Error retrieving temperature records for start date " + str(start) + " end date " + str(end)
url.close()
time.sleep(10)
|
heatseeknyc/data-science
|
src/wunderground.py
|
Python
|
mit
| 2,014
| 0.036743
|
import logging
def init_logger():
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
logger = logging.getLogger('redberry')
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setFormatter(formatter)
logger.addHandler(console)
|
michaelcho/redberry
|
redberry/utils/logger.py
|
Python
|
apache-2.0
| 327
| 0.003058
|
from __future__ import absolute_import, unicode_literals
import pytest
from case import Mock, patch
from vine import promise
from amqp.abstract_channel import AbstractChannel
from amqp.exceptions import AMQPNotImplementedError, RecoverableConnectionError
from amqp.serialization import dumps
class test_AbstractChannel:
class Channel(AbstractChannel):
def _setup_listeners(self):
pass
@pytest.fixture(autouse=True)
def setup_conn(self):
self.conn = Mock(name='connection')
self.conn.channels = {}
self.channel_id = 1
self.c = self.Channel(self.conn, self.channel_id)
self.method = Mock(name='method')
self.content = Mock(name='content')
self.content.content_encoding = 'utf-8'
self.c._METHODS = {(50, 61): self.method}
def test_enter_exit(self):
self.c.close = Mock(name='close')
with self.c:
pass
self.c.close.assert_called_with()
def test_send_method(self):
self.c.send_method((50, 60), 'iB', (30, 0))
self.conn.frame_writer.assert_called_with(
1, self.channel_id, (50, 60), dumps('iB', (30, 0)), None,
)
def test_send_method__callback(self):
callback = Mock(name='callback')
p = promise(callback)
self.c.send_method((50, 60), 'iB', (30, 0), callback=p)
callback.assert_called_with()
def test_send_method__wait(self):
self.c.wait = Mock(name='wait')
self.c.send_method((50, 60), 'iB', (30, 0), wait=(50, 61))
self.c.wait.assert_called_with((50, 61), returns_tuple=False)
def test_send_method__no_connection(self):
self.c.connection = None
with pytest.raises(RecoverableConnectionError):
self.c.send_method((50, 60))
def test_send_method__connection_dropped(self):
self.c.connection.frame_writer.side_effect = StopIteration
with pytest.raises(RecoverableConnectionError):
self.c.send_method((50, 60))
def test_close(self):
with pytest.raises(NotImplementedError):
self.c.close()
def test_wait(self):
with patch('amqp.abstract_channel.ensure_promise') as ensure_promise:
p = ensure_promise.return_value
p.ready = False
def on_drain(*args, **kwargs):
p.ready = True
self.conn.drain_events.side_effect = on_drain
p.value = (1,), {'arg': 2}
self.c.wait((50, 61), timeout=1)
self.conn.drain_events.assert_called_with(timeout=1)
prev = self.c._pending[(50, 61)] = Mock(name='p2')
p.value = None
self.c.wait([(50, 61)])
assert self.c._pending[(50, 61)] is prev
def test_dispatch_method__content_encoding(self):
self.c.auto_decode = True
self.method.args = None
self.c.dispatch_method((50, 61), 'payload', self.content)
self.content.body.decode.side_effect = KeyError()
self.c.dispatch_method((50, 61), 'payload', self.content)
def test_dispatch_method__unknown_method(self):
with pytest.raises(AMQPNotImplementedError):
self.c.dispatch_method((100, 131), 'payload', self.content)
def test_dispatch_method__one_shot(self):
self.method.args = None
p = self.c._pending[(50, 61)] = Mock(name='oneshot')
self.c.dispatch_method((50, 61), 'payload', self.content)
p.assert_called_with((50, 61), self.content)
def test_dispatch_method__one_shot_no_content(self):
self.method.args = None
self.method.content = None
p = self.c._pending[(50, 61)] = Mock(name='oneshot')
self.c.dispatch_method((50, 61), 'payload', self.content)
p.assert_called_with((50, 61))
assert not self.c._pending
def test_dispatch_method__listeners(self):
with patch('amqp.abstract_channel.loads') as loads:
loads.return_value = [1, 2, 3], 'foo'
p = self.c._callbacks[(50, 61)] = Mock(name='p')
self.c.dispatch_method((50, 61), 'payload', self.content)
p.assert_called_with(1, 2, 3, self.content)
def test_dispatch_method__listeners_and_one_shot(self):
with patch('amqp.abstract_channel.loads') as loads:
loads.return_value = [1, 2, 3], 'foo'
p1 = self.c._callbacks[(50, 61)] = Mock(name='p')
p2 = self.c._pending[(50, 61)] = Mock(name='oneshot')
self.c.dispatch_method((50, 61), 'payload', self.content)
p1.assert_called_with(1, 2, 3, self.content)
p2.assert_called_with((50, 61), 1, 2, 3, self.content)
assert not self.c._pending
assert self.c._callbacks[(50, 61)]
|
pexip/os-python-amqp
|
t/unit/test_abstract_channel.py
|
Python
|
lgpl-2.1
| 4,759
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-24 07:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appauth', '0016_userprofile_numq'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='exp_data',
field=models.TextField(default='{}'),
),
]
|
PrefPy/opra
|
compsocsite/appauth/migrations/0017_userprofile_exp_data.py
|
Python
|
mit
| 453
| 0
|
#!/usr/bin/python
# vim: set expandtab tabstop=4 shiftwidth=4:
# -*- coding: utf-8 -*-
# gen_cacert <http://rhizomatik.net/>
# Python functions for generate a X509 CA certificate
#
# Copyright (C) 2010 duy at rhizomatik dot net
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"""
gen_cacert
Python functions for generate a X509 CA certificate.
Usage: execute ./gen_cacert -h
@author: duy
@organization: rhizomatik labs
@copyright: author
@license: GNU GPL version 3 or any later version
(details at http://www.gnu.org)
@contact: duy at rhizomatik dot net
@dependencies: python (>= version 2.5)
@change log:
@TODO:
* Get error/warning when some of the main parameters have space and th
at and the nexts get ignored
* Add paramter for certificate serial path
"""
__app__ = "gen_cacert"
__author__ = "duy"
__version__ = "0.1"
__copyright__ = "Copyright (c) 2010 duy"
__date__ = "2010/03/01"
__license__ = " GNU GPL version 3 or any later version (details at http://www.gnu.org)"
__credits__ = ""
from xmpp_foaf_cert import *
import sys
import getopt
DEBUG = True
## ----------------------------------------------------------------------
## administrative functions
## ----------------------------------------------------------------------
def _usage():
print "Usage: %s options" % __app__
print """
Options:
-h, --help Print this usage message.
-d, --debug
-p, --certificate-path CA certificate path
-k, --certificate-key-path CA private key path
-n, --commmonname certificate commonName
-c, --country certificate countryName
-o, --organization certificate organizationName
-u, --organizationalunit certificate organizationalUnitNam
-e, --email certificate emailAddress
"""
def _version():
"""
Display a formatted version string for the module
"""
print """%(__app__)s %(__version__)s
%(__copyright__)s
released %(__date__)s
Thanks to:
%(__credits__)s""" % globals()
def main(argv):
"""
Create an x509 CA certificate and save it as PEM file
@param CN: certificate commonName
@param C: certificate countryName
@param O: certificate organizationName
@param OU: certificate organizationalUnitName
@param Email: certificate emailAddress
@type CN: string
@type C: string
@type O: string
@type OU: string
@type Email: string
@param cacert_path: CA certificate path
@param cakey_path: CA private key path
@type cacert_path: string
@type cakey_path: string
"""
short_opts = "hdp:k:n:c:o:u:e:"
long_opts = ["help","debug", "certificate-path=","certificate-key-path=","commonname=","country=","organization=","organizationalunit=","email="]
try:
opts, args = getopt.getopt(argv, short_opts, long_opts)
except getopt.GetoptError:
print "The CA certificate will be created with default values"
# _usage()
# sys.exit(0)
# Example default values
CN = "CA Certificate"
C = "CR"
O="Rhizomatik Labs"
OU="Mycelia project"
Email="ca@rhizomatik.net"
cacert_path='/tmp/xmpp_foaf_cacert.pem'
cakey_path='/tmp/xmpp_foaf_cakey.key'
for opt, arg in opts:
if opt in ("-h", "--help"):
_usage()
sys.exit(0)
elif opt in ("-p","--certificate-path"):
cacert_path = arg
elif opt in ("-k","--certificate-key-path"):
cakey_path = arg
elif opt in ("-n","--commmonname"):
CN = arg
elif opt in ("-c","--country"):
C = arg
elif opt in ("-o","--organization"):
O = arg
elif opt in ("-u","--organizationalunit"):
OU = arg
elif opt in ("-e","--email"):
Email = arg
if DEBUG:
print "CN: "+CN
print "C: "+C
print "O: "+O
print "OU: "+OU
print "Email: "+Email
mkcacert_save(cacert_path, cakey_path, CN, C, O, OU, Email)
if __name__ == "__main__":
main(sys.argv[1:])
|
duy/python-foafcert
|
foafcert/gen_cacert.py
|
Python
|
gpl-2.0
| 4,578
| 0.008301
|
import six
from unittest import TestCase
from dark.reads import Read, Reads
from dark.score import HigherIsBetterScore
from dark.hsp import HSP, LSP
from dark.alignments import (
Alignment, bestAlignment, ReadAlignments, ReadsAlignmentsParams,
ReadsAlignments)
class TestAlignment(TestCase):
"""
Tests for the dark.alignment.Alignment class
"""
def testExpectedAttrs(self):
"""
An alignment must have the expected attributes.
"""
alignment = Alignment(45, 'title')
self.assertEqual('title', alignment.subjectTitle)
self.assertEqual(45, alignment.subjectLength)
def testNoHspsWhenCreated(self):
"""
An alignment must have no HSPs when it is created.
"""
alignment = Alignment(45, 'title')
self.assertEqual(0, len(alignment.hsps))
def testAddHsp(self):
"""
It must be possible to add an HSP to an alignment.
"""
alignment = Alignment(45, 'title')
alignment.addHsp(HSP(3))
self.assertEqual(HSP(3), alignment.hsps[0])
class TestReadAlignments(TestCase):
"""
Tests for the dark.alignment.ReadAlignments class
"""
def testRead(self):
"""
An read alignments must store its read.
"""
read = Read('id', 'ACGT')
readAlignments = ReadAlignments(read)
self.assertEqual(read, readAlignments.read)
def testNoAlignments(self):
"""
An read alignments must be able to have no alignments.
"""
read = Read('id', 'ACGT')
readAlignments = ReadAlignments(read)
self.assertEqual(0, len(readAlignments))
def testAlignments(self):
"""
An read alignments must store its alignments.
"""
read = Read('id', 'ACGT')
alignment1 = Alignment(45, 'title1')
alignment2 = Alignment(55, 'title2')
readAlignments = ReadAlignments(read, [alignment1, alignment2])
self.assertEqual([alignment1, alignment2], readAlignments)
class TestBestAlignmentHSP(TestCase):
"""
Test the L{dark.hits.bestAlignment} function when HSPs are used.
"""
def testOneAlignment(self):
"""
When one alignment is present that alignment must be returned by
bestAlignment.
"""
alignment = Alignment(44, 'Seq 1')
alignment.addHsp(HSP(10))
alignment.addHsp(HSP(9))
alignments = [alignment]
hit = ReadAlignments(Read('id1', 'aaa'), alignments)
best = bestAlignment(hit)
self.assertEqual('Seq 1', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
def testThreeAlignments(self):
"""
When three alignments are present, the one with the highest first HSP
must be returned by bestAlignment.
"""
alignment1 = Alignment(33, 'Seq 1')
alignment1.addHsp(HSP(10))
alignment1.addHsp(HSP(9))
alignment2 = Alignment(44, 'Seq 2')
alignment2.addHsp(HSP(30))
alignment2.addHsp(HSP(29))
alignment3 = Alignment(55, 'Seq 3')
alignment3.addHsp(HSP(20))
alignment3.addHsp(HSP(19))
alignments = [alignment1, alignment2, alignment3]
hit = ReadAlignments(Read('id1', 'aaa'), alignments)
best = bestAlignment(hit)
self.assertEqual('Seq 2', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
class TestBestAlignmentLSP(TestCase):
"""
Test the L{dark.hits.bestAlignment} function when LSPs are used.
"""
def testOneAlignment(self):
"""
When one alignment is present that alignment must be returned by
bestAlignment.
"""
alignment = Alignment(44, 'Seq 1')
alignment.addHsp(LSP(10))
alignment.addHsp(LSP(9))
alignments = [alignment]
readAlignments = ReadAlignments(Read('id0', 'aaa'), alignments)
best = bestAlignment(readAlignments)
self.assertEqual('Seq 1', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
def testThreeAlignments(self):
"""
When three alignments are present, the one with the lowest first HSP
must be returned by bestAlignment.
"""
alignment1 = Alignment(33, 'Seq 1')
alignment1.addHsp(LSP(10))
alignment1.addHsp(LSP(9))
alignment2 = Alignment(44, 'Seq 2')
alignment2.addHsp(LSP(3))
alignment2.addHsp(LSP(2))
alignment3 = Alignment(55, 'Seq 3')
alignment3.addHsp(LSP(20))
alignment3.addHsp(LSP(19))
alignments = [alignment1, alignment2, alignment3]
readAlignments = ReadAlignments(Read('id0', 'aaa'), alignments)
best = bestAlignment(readAlignments)
self.assertEqual('Seq 2', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
class TestReadsAlignmentsParams(TestCase):
"""
Test the L{dark.alignments.ReadsAlignmentsParams} class.
"""
def testExpectedAttrs(self):
"""
A ReadsAlignmentsParams instance must have the expected attributes.
"""
applicationParams = {}
params = ReadsAlignmentsParams('application name', applicationParams,
False, 'Bit score')
self.assertEqual('application name', params.application)
self.assertIs(applicationParams, params.applicationParams)
self.assertFalse(params.subjectIsNucleotides)
self.assertEqual('Bit score', params.scoreTitle)
class TestReadsAlignments(TestCase):
"""
Test the L{dark.alignments.ReadsAlignments} class.
"""
# NOTE: The ReadsAlignments class is a base class for concrete
# implementations, such as BlastReadsAlignments. So it can only be
# tested minimally by itself. For full tests see the
# TestBlastReadsAlignments and TestBlastReadsAlignmentsFiltering
# classes in test/blast/blast_alignments.py
def testExpectedAttrs(self):
"""
A ReadsAlignments instance must have the expected attributes.
"""
reads = Reads()
params = {
'application': 'app name'
}
readsAlignments = ReadsAlignments(reads, params)
self.assertIs(readsAlignments.reads, reads)
self.assertEqual('app name', readsAlignments.params['application'])
self.assertIs(params, readsAlignments.params)
self.assertIs(HigherIsBetterScore, readsAlignments.scoreClass)
def testNotIterable(self):
"""
Iterating an empty ReadsAlignments must result in the empty list.
"""
reads = Reads()
readsAlignments = ReadsAlignments(reads, 'applicationName', None)
self.assertEqual([], list(readsAlignments))
def testGetSubjectSequence(self):
"""
A ReadsAlignments instance will not implement getSubjectSequence.
Subclasses are expected to implement it.
"""
reads = Reads()
readsAlignments = ReadsAlignments(reads, 'applicationName', None)
error = 'getSubjectSequence must be implemented by a subclass'
six.assertRaisesRegex(self, NotImplementedError, error,
readsAlignments.getSubjectSequence, 'title')
|
bamueh/dark-matter
|
test/test_alignments.py
|
Python
|
mit
| 7,278
| 0
|
import cv2
import logging
import numpy as np
import os
import sys
from lib.warping import ImageBlobWarping
from lib.singleton import Singleton
from logging import FileHandler, StreamHandler
from multiprocessing import Pool
from ocr import Ocr
from timeit import default_timer as timer
from vlogging import VisualRecord
from lib.timing import timing
logger = logging.getLogger(__name__)
"""
Process an image and tries to find a rectangle looking similar to an argentine license plate
and do ocr
"""
@Singleton
class PlateDetector(object):
def __init__(self):
self.vlogger = None
self.pre= None
self.edged= None
self.warp= ImageBlobWarping()
self.bnight= False
self.ocr_engine = Ocr('spa', logger)
def set_logger(self, logger):
self.vlogger = logger
def bestocr(self, ocrlst):
ocr_overall_acc_lst= []
imax= -1
if len(ocrlst) == 0:
return imax
ocr_acc= 0
#~ print ocrlst
for ocritm in ocrlst:
#~ print ocritm
for det in ocritm:
if det is not None and det[1] != None:
try:
ocr_acc = ocr_acc + det[1]**2
except:
pass
if len(ocritm) > 0:
ocr_acc /= len(ocritm)
ocr_acc= ocr_acc**0.5
print "ocr_acc: %.3f %%"%ocr_acc
ocr_overall_acc_lst.append(round(ocr_acc,3))
imax= max(ocr_overall_acc_lst)
return ocr_overall_acc_lst.index(imax)
""" Return best text recognized """
def first(self, img):
bbox= None
code= None
cnt= None
blobs= self.findBlobs( img )
ocrlst= []
bboxlst= []
for orig_rot_blob in blobs:
bb= np.int0(cv2.boxPoints( orig_rot_blob ))
bbox= cv2.boundingRect( bb )
w= bbox[2]
h= bbox[3]
if (w > 2*h) and (w > 80) and (w < 200): # this should be relative to image dimensions
code = self.ocr( orig_rot_blob )
if code:
ocrlst.append( code )
bboxlst.append( bbox )
print code
if len(code) == 6:
break
# hardcoded -- width should not be higher than img.width / 8
if (w > 2*h) and (w > 80) and (w < 400): # second stage without max size constraints
code = self.ocr( orig_rot_blob )
if code:
ocrlst.append( code )
bboxlst.append( bbox )
if len(code) == 6:
break
if len( ocrlst ) > 0:
ocr_best_index= self.bestocr( ocrlst )
if ocr_best_index != -1:
code = ocrlst[ ocr_best_index ]
bbox = bboxlst[ ocr_best_index ]
else:
print "none"
return code, bbox
def findBlobs(self, img):
rects= []
cnts= self.findContours(img)
for c in cnts:
c= c.reshape(-1, 2)
if len(c) < 4:
continue
arcl= cv2.arcLength(c, True)
approx= cv2.approxPolyDP(c, 0.02 * arcl, True)
approx= approx.reshape(-1, 2)
rect= cv2.minAreaRect(approx)
w, h= rect[1]
if len(approx) >= 4:
if (h > 0) and (w > h):
ratio = float(w) / h
if 2.4 < ratio < 4.2:
rects.append(rect)
return rects
def ocr(self, rect):
ang= rect[2]
w,h= rect[1]
if ang < -45:
ang= ang + 90
w= h
h= rect[1][0]
box= cv2.boxPoints(rect)
box= np.int0(box)
box= self.warp.order_points(box)
letters= []
code= []
try:
roic= self.warp.transform(self.edged, box)
roi= self.warp.transform(self.pre, box)
roi_orig= self.warp.transform(self.original_image, box)
except:
pass
print "some error"
return code
(roich, roicw)= roic.shape[:2]
nh= 143
if roich > 200:
nw= (roicw * nh)/roich
roi= cv2.resize(roi,(nw, nh), interpolation= cv2.INTER_LINEAR)
roic= cv2.resize(roic,(nw, nh), interpolation= cv2.INTER_LINEAR)
#~ self.do_skeleton(roi)
image_rect= self.prepare_for_ocr(roi)
image_rect2= image_rect.copy()
if self.vlogger:
self.vlogger.debug(VisualRecord("candidate", [image_rect], fmt = "jpg"))
i, cnts, hie_letters= cv2.findContours(image_rect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if self.vlogger:
self.vlogger.debug(VisualRecord("candidate after contours", [cv2.drawContours(roi_orig,cnts,-1,(0,255,0),1)], fmt = "jpg"))
h= roic.shape[0]
filtered_cnts= []
for i,b in enumerate(cnts):
hie_let= hie_letters[0][i]
# [next, previous, first_child, parent]
if hie_let[3] == -1: # if contour has no parent then continue with next
continue
c = b.reshape(-1,2)
if len(b) < 3: # ??
continue
r= cv2.boundingRect(c)
# pantentes.txt - las letras miden 3.2cm y la patente completa 29.4cm
if r[2] < (image_rect.shape[1] / 10):
continue
ratio= float(r[3]) / r[2]
if not 1.5 <= ratio <= 2.5:
continue
letters.append(r)
filtered_cnts.append(b)
if len(letters) >= 4:
for p in enumerate(sorted(letters, key= lambda b:b[0])):
code.append(self._do_ocr(image_rect2, p[1], p[0]))
if self.vlogger:
self.vlogger.debug(VisualRecord("LETTER DETECTION", [cv2.drawContours(image_rect2,filtered_cnts,-1,(0,255,0),1)], fmt = "jpg"))
return code
def _do_ocr(self, img, b, i):
x,y,w,h = b
l = cv2.copyMakeBorder(
img[y:y+h, x:x+w],
5, 5, 5, 5, cv2.BORDER_CONSTANT,
value=255)
if i > 2:
return self.ocr_engine.read_digit(l)
return self.ocr_engine.read_text(l)
def findContours(self, img):
imgcopy= img.copy()
if self.bnight:
i= self.prepare_night(img)
else:
i= self.prepare_day(img)
_,cnts, hie = cv2.findContours(i, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if self.vlogger:
if self.bnight:
self.vlogger.debug(VisualRecord("contours", [cv2.drawContours(imgcopy,cnts,-1, (80,255,80),2),i], fmt = "jpg"))
else:
self.vlogger.debug(VisualRecord("contours", [cv2.drawContours(imgcopy,cnts,-1, (255,120,120),2),i], fmt = "jpg"))
return cnts
####################################################################################################
def prepare_night(self, img):
tinit= timer()
self.original_image= img
gray= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gauss_gray= cv2.GaussianBlur(gray, (5, 5), 0)
max_gray= np.max(gray)
std_gray= np.std(gray)
saturated_night= np.uint8(( gray > ( max_gray - 2 * std_gray )) * 255) # argentina
self.pre= gauss_gray
self.edged= cv2.Canny(saturated_night, 10, 200, apertureSize= 5)
if self.vlogger:
self.vlogger.debug(VisualRecord("thresholding > (max - 2 * std)", [saturated_night], fmt = "jpg"))
print "e:%.3f"%(timer()-tinit)
return self.edged
####################################################################################################
def prepare_day(self, img):
self.original_image= img
gray= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gauss_gray= cv2.GaussianBlur(gray, (5, 5), 0)
self.pre= gauss_gray
self.edged= cv2.Canny(gauss_gray, 1000, 1700, apertureSize= 5)
if self.vlogger:
self.vlogger.debug(VisualRecord("day prepare", [self.pre, self.edged], fmt = "jpg"))
return self.edged
####################################################################################################
def angle_cos(self, p0, p1, p2):
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def prepare_for_ocr(self, img, scale=True):
kern= cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
# http://docs.opencv.org/master/d5/daf/tutorial_py_histogram_equalization.html#gsc.tab=0
clahe= cv2.createCLAHE(clipLimit=2.0, tileGridSize=(5,5))
ims= clahe.apply(img)
ret,th= cv2.threshold(ims, 150, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
th1= cv2.morphologyEx(th, cv2.MORPH_CLOSE, kern)
th2= self.create_rect(th1)
if self.vlogger:
self.vlogger.debug(VisualRecord("prepare_for_ocr", [img, ims, th], fmt = "jpg"))
return th2
def create_rect(self, img):
dims= img.shape
imgcop= img.copy()
imgcop[0:4,0:dims[1]]= 255
imgcop[dims[0]-2:dims[0],0:dims[1]]= 255
if self.vlogger:
self.vlogger.debug(VisualRecord("CREATE RECT", [imgcop], fmt = "jpg"))
return imgcop
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
ch = StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
p = os.path.dirname(os.path.abspath(__file__))
p = os.path.dirname(os.path.dirname(p))
fh = FileHandler(p+"/log/debug.html", mode = "w")
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
if len(sys.argv) >= 2:
path = sys.argv[1]
else:
path = '/home/queimadas/patchcap/samples/images/ehy435.jpg'
path = '/home/queimadas/patchcap/samples/images/ehy435.jpg'
s = timer()
f = PlateDetector.Instance()
f.set_logger(logger)
if os.path.exists(path):
img = cv2.imread(path)
else:
print "file does not exists..."
exit()
logger.debug(VisualRecord("letters", [img], fmt = "jpg"))
txt = f.first(img)
e = timer()
logger.debug('tiempo de exe %s', (e-s))
print txt
print 'tiempo de exe %s', (e-s)
#~ def prepare_laplace_highpass(self, img, scale=True):
#~ tinit= timer()
#~ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#~ self.gauss_gray = cv2.GaussianBlur(gray, (5, 5), 0)
#~ self.lap_gray = cv2.Laplacian(self.gauss_gray, cv2.CV_8U)
#~ min_val= np.min(self.gauss_gray)
#~ max_val= np.max(self.gauss_gray)
#~ dif= max_val - min_val
#~ step= dif / 10
#~ print min_val, max_val, dif, step
#~ self.edged_gauss = cv2.Canny(self.gauss_gray, min_val + 6*step, max_val - step, apertureSize= 5)
#~ self.edged_lap = cv2.Canny(self.lap_gray, np.min(self.lap_gray) , np.max(self.lap_gray), apertureSize= 5)
#~ if self.vlogger:
#~ self.vlogger.debug(VisualRecord("laplace filtering", [gray, self.gauss_gray, self.lap_gray, self.edged_gauss], fmt = "jpg"))
#~ print "e:%.3f"%(timer()-tinit)
#~ return self.edged_pre
|
MirichST/patchcap
|
src/daemon/platedetector.py
|
Python
|
gpl-2.0
| 11,192
| 0.028145
|
__all__ = ["wordlists", "roles", "bnc", "processes", "verbs",
"uktous", "tagtoclass", "queries", "mergetags"]
from corpkit.dictionaries.bnc import _get_bnc
from corpkit.dictionaries.process_types import processes
from corpkit.dictionaries.process_types import verbs
from corpkit.dictionaries.roles import roles
from corpkit.dictionaries.wordlists import wordlists
from corpkit.dictionaries.queries import queries
from corpkit.dictionaries.word_transforms import taglemma
from corpkit.dictionaries.word_transforms import mergetags
from corpkit.dictionaries.word_transforms import usa_convert
roles = roles
wordlists = wordlists
processes = processes
bnc = _get_bnc
queries = queries
tagtoclass = taglemma
uktous = usa_convert
mergetags = mergetags
verbs = verbs
|
interrogator/corpkit
|
corpkit/dictionaries/__init__.py
|
Python
|
mit
| 774
| 0.002584
|
from django.contrib import admin
from simulation.models import SimulationStage, SimulationStageMatch, SimulationStageMatchResult
class SimulationStageAdmin(admin.ModelAdmin):
list_display = ["number", "created_at"]
list_filter = ["created_at"]
class SimulationStageMatchAdmin(admin.ModelAdmin):
list_display = ["stage", "order", "raund",
"cat", "rat", "won", "created_at"]
list_filter = ["stage", "created_at"]
search_fields = ["cat", "rat"]
readonly_fields = ["won", "cat_password", "rat_password", "system_password"]
class SimulationStageMatchResultAdmin(admin.ModelAdmin):
list_display = ["match", "is_caught", "distance", "is_cancelled", "created_at"]
list_filter = ["created_at"]
search_fields = ["match"]
admin.site.register(SimulationStage, SimulationStageAdmin)
admin.site.register(SimulationStageMatch, SimulationStageMatchAdmin)
admin.site.register(SimulationStageMatchResult, SimulationStageMatchResultAdmin)
|
bilbeyt/ituro
|
ituro/simulation/admin.py
|
Python
|
mit
| 972
| 0.007202
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import scipy as sp
#Nome do arquivo em que está os dados da posição
arq = 'CurvaGiro/pos.dat'
#Limites dos eixos
v = [-10,1000, 0, 1000]
#Título eixo x
xl = r'y metros'
#Título do eixo y
yl = r'x metros'
x = sp.genfromtxt('CurvaGiro/pos.dat')
a = plt.plot(x[:,2], x[:,1], 'k-')
plt.grid(True, 'both', color = '0.8', linestyle = '--', linewidth = 1)
plt.axis(v)
plt.xlabel(xl)
plt.ylabel(yl)
plt.show(a)
|
asoliveira/NumShip
|
scripts/entrada/padrao/plot-1cg.py
|
Python
|
gpl-3.0
| 489
| 0.02686
|
#!/usr/bin/env python3
import os
import re
import itertools
from functools import reduce
from .version import __version__
sep_regex = re.compile(r'[ \-_~!@#%$^&*\(\)\[\]\{\}/\:;"|,./?`]')
def get_portable_filename(filename):
path, _ = os.path.split(__file__)
filename = os.path.join(path, filename)
return filename
def load_conversion_file(filename):
filename = get_portable_filename(filename)
with open(filename, encoding='utf-8') as f:
l = list(f)
l = [i for i in l if i.strip()]
l = [i.strip().split() for i in l]
return {i[0]: i[1:] for i in l}
print('Loading converters...')
beginning = load_conversion_file('f2p-beginning.txt')
middle = load_conversion_file('f2p-middle.txt')
ending = load_conversion_file('f2p-ending.txt')
print('Loading persian word list...')
with open(get_portable_filename('persian-word-freq.txt'), encoding='utf-8') as f:
word_freq = list(f)
word_freq = [i.strip() for i in word_freq if i.strip()]
word_freq = [i.split() for i in word_freq if not i.startswith('#')]
word_freq = {i[0]: int(i[1]) for i in word_freq}
print('Loading dictionary...')
with open(get_portable_filename('f2p-dict.txt'), encoding='utf-8') as f:
dictionary = [i.strip().split(' ', 1) for i in f if i.strip()]
dictionary = {k.strip(): v.strip() for k, v in dictionary}
def f2p_word_internal(word, original_word):
# this function receives the word as separate letters
persian = []
for i, letter in enumerate(word):
if i == 0:
converter = beginning
elif i == len(word) - 1:
converter = ending
else:
converter = middle
conversions = converter.get(letter)
if conversions == None:
return [(''.join(original_word), 0.0)]
else:
conversions = ['' if i == 'nothing' else i for i in conversions]
persian.append(conversions)
alternatives = itertools.product(*persian)
alternatives = [''.join(i) for i in alternatives]
alternatives = [(i, word_freq[i]) if i in word_freq else (i, 0)
for i in alternatives]
if len(alternatives) > 0:
max_freq = max(freq for _, freq in alternatives)
alternatives = [(w, float(freq / max_freq)) if freq != 0 else (w, 0.0)
for w, freq in alternatives]
else:
alternatives = [(''.join(word), 1.0)]
return alternatives
def variations(word):
"""Create variations of the word based on letter combinations like oo,
sh, etc."""
if word == 'a':
return [['A']]
elif len(word) == 1:
return [[word[0]]]
elif word == 'aa':
return [['A']]
elif word == 'ee':
return [['i']]
elif word == 'ei':
return [['ei']]
elif word in ['oo', 'ou']:
return [['u']]
elif word == 'kha':
return [['kha'], ['kh', 'a']]
elif word in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']:
return [[word]]
elif word in ["'ee", "'ei"]:
return [["'i"]]
elif word in ["'oo", "'ou"]:
return [["'u"]]
elif word in ["a'", "e'", "o'", "i'", "u'", "A'"]:
return [[word[0] + "'"]]
elif word in ["'a", "'e", "'o", "'i", "'u", "'A"]:
return [["'" + word[1]]]
elif len(word) == 2 and word[0] == word[1]:
return [[word[0]]]
if word[:2] == 'aa':
return [['A'] + i for i in variations(word[2:])]
elif word[:2] == 'ee':
return [['i'] + i for i in variations(word[2:])]
elif word[:2] in ['oo', 'ou']:
return [['u'] + i for i in variations(word[2:])]
elif word[:3] == 'kha':
return \
[['kha'] + i for i in variations(word[3:])] + \
[['kh', 'a'] + i for i in variations(word[3:])] + \
[['k', 'h', 'a'] + i for i in variations(word[3:])]
elif word[:2] in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']:
return \
[[word[:2]] + i for i in variations(word[2:])] + \
[[word[0]] + i for i in variations(word[1:])]
elif word[:2] in ["a'", "e'", "o'", "i'", "u'", "A'"]:
return [[word[:2]] + i for i in variations(word[2:])]
elif word[:3] in ["'ee", "'ei"]:
return [["'i"] + i for i in variations(word[3:])]
elif word[:3] in ["'oo", "'ou"]:
return [["'u"] + i for i in variations(word[3:])]
elif word[:2] in ["'a", "'e", "'o", "'i", "'u", "'A"]:
return [[word[:2]] + i for i in variations(word[2:])]
elif len(word) >= 2 and word[0] == word[1]:
return [[word[0]] + i for i in variations(word[2:])]
else:
return [[word[0]] + i for i in variations(word[1:])]
def f2p_word(word, max_word_size=15, cutoff=3):
"""Convert a single word from Finglish to Persian.
max_word_size: Maximum size of the words to consider. Words larger
than this will be kept unchanged.
cutoff: The cut-off point. For each word, there could be many
possibilities. By default 3 of these possibilities are considered
for each word. This number can be changed by this argument.
"""
original_word = word
word = word.lower()
c = dictionary.get(word)
if c:
return [(c, 1.0)]
if word == '':
return []
elif len(word) > max_word_size:
return [(original_word, 1.0)]
results = []
for w in variations(word):
results.extend(f2p_word_internal(w, original_word))
# sort results based on the confidence value
results.sort(key=lambda r: r[1], reverse=True)
# return the top three results in order to cut down on the number
# of possibilities.
return results[:cutoff]
def f2p_list(phrase, max_word_size=15, cutoff=3):
"""Convert a phrase from Finglish to Persian.
phrase: The phrase to convert.
max_word_size: Maximum size of the words to consider. Words larger
than this will be kept unchanged.
cutoff: The cut-off point. For each word, there could be many
possibilities. By default 3 of these possibilities are considered
for each word. This number can be changed by this argument.
Returns a list of lists, each sub-list contains a number of
possibilities for each word as a pair of (word, confidence)
values.
"""
# split the phrase into words
results = [w for w in sep_regex.split(phrase) if w]
# return an empty list if no words
if results == []:
return []
# convert each word separately
results = [f2p_word(w, max_word_size, cutoff) for w in results]
return results
def f2p(phrase, max_word_size=15, cutoff=3):
"""Convert a Finglish phrase to the most probable Persian phrase.
"""
results = f2p_list(phrase, max_word_size, cutoff)
return ' '.join(i[0][0] for i in results)
def main():
print('Finglish to Persian Converter, v{}'.format(__version__))
print('finglish: ', end='')
phrase = input()
result = f2p(phrase)
print(result)
if __name__ == '__main__':
main()
|
elektito/finglish
|
finglish/f2p.py
|
Python
|
mit
| 6,952
| 0.002158
|
"""The IPython kernel implementation"""
import getpass
import sys
import traceback
from IPython.core import release
from IPython.html.widgets import Widget
from IPython.utils.py3compat import builtin_mod, PY3
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from IPython.utils.traitlets import Instance, Type, Any
from IPython.utils.decorators import undoc
from ..comm import CommManager
from .kernelbase import Kernel as KernelBase
from .serialize import serialize_object, unpack_apply_message
from .zmqshell import ZMQInteractiveShell
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
shell_class = Type(ZMQInteractiveShell)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir=self.profile_dir,
user_module=self.user_module,
user_ns=self.user_ns,
kernel=self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.shell.data_pub.session = self.session
self.shell.data_pub.pub_socket = self.iopub_socket
# TMP - hack while developing
self.shell._reply_content = None
self.comm_manager = CommManager(shell=self.shell, parent=self,
kernel=self)
self.comm_manager.register_target(
'ipython.widget', Widget.handle_comm_opened)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = ['comm_open', 'comm_msg', 'comm_close']
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(
self.comm_manager, msg_type)
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {'name': 'ipython',
'version': sys.version_info[0]},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrememnting done by KernelBase, in favour of our shell's
# execution counter.
pass
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
# FIXME: the shell calls the exception handler itself.
shell._reply_content = None
try:
shell.run_cell(code, store_history=store_history, silent=silent)
except:
status = u'error'
# FIXME: this code right now isn't being used yet by default,
# because the run_cell() call above directly fires off exception
# reporting. This code, therefore, is only active in the scenario
# where runlines itself has an unhandled exception. We need to
# uniformize this, for all exception construction to come from a
# single location in the codbase.
etype, evalue, tb = sys.exc_info()
tb_list = traceback.format_exception(etype, evalue, tb)
reply_content.update(shell._showtraceback(etype, evalue, tb_list))
else:
status = u'ok'
finally:
self._restore_input()
reply_content[u'status'] = status
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
# FIXME - fish exception info out of shell, possibly left there by
# runlines. We'll need to clean up this logic later.
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(
engine_uuid=self.ident, engine_id=self.int_id, method='execute')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
if 'traceback' in reply_content:
self.log.info(
"Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and clear the payload system always.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be agressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches': matches,
'cursor_end': cursor_pos,
'cursor_start': cursor_pos - len(txt),
'metadata': {},
'status': 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
info = self.shell.object_inspect(name)
reply_content = {'status': 'ok'}
reply_content['data'] = data = {}
reply_content['metadata'] = {}
reply_content['found'] = info['found']
if info['found']:
info_text = self.shell.object_inspect_text(
name,
detail_level=detail_level,
)
data['text/plain'] = info_text
return reply_content
def do_history(self, hist_access_type, output, raw, session=None, start=None,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {'history': list(hist)}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
status, indent_spaces = self.shell.input_transformer_manager.check_complete(
code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
shell = self.shell
try:
working = shell.user_ns
prefix = "_" + str(msg_id).replace("-", "") + "_"
f, args, kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix + "f"
argname = prefix + "args"
kwargname = prefix + "kwargs"
resultname = prefix + "result"
ns = {fname: f, argname: args, kwargname: kwargs, resultname: None}
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname,
fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except:
# invoke IPython traceback formatting
shell.showtraceback()
# FIXME - fish exception info out of shell, possibly left there by
# run_code. We'll need to clean up this logic later.
reply_content = {}
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(
engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info(
"Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = []
if reply_content['ename'] == 'UnmetDependency':
reply_metadata['dependencies_met'] = False
else:
reply_content = {'status': 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
@undoc
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of IPython.kernel.zmq.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs)
|
mattvonrocketstein/smash
|
smashlib/ipy3x/kernel/zmq/ipkernel.py
|
Python
|
mit
| 13,151
| 0.000912
|
# -*- coding: utf-8 -*-
#
# phys_pkg.py
#
# Copyright (C) 2013 Steve Canny scanny@cisco.com
#
# This module is part of opc-diag and is released under the MIT License:
# http://www.opensource.org/licenses/mit-license.php
"""Interface to a physical OPC package, either a zip archive or directory"""
import os
import shutil
from zipfile import ZIP_DEFLATED, ZipFile
class BlobCollection(dict):
"""
Structures a set of blobs, like a set of files in an OPC package.
It can add and retrieve items by URI (relative path, roughly) and can
also retrieve items by uri_tail, the trailing portion of the URI.
"""
class PhysPkg(object):
"""
Provides read and write services for packages on the filesystem. Suitable
for use with OPC packages in either Zip or expanded directory form.
|PhysPkg| objects are iterable, generating a (uri, blob) 2-tuple for each
item in the package.
"""
def __init__(self, blobs, root_uri):
super(PhysPkg, self).__init__()
self._blobs = blobs
self._root_uri = root_uri
def __iter__(self):
"""
Generate a (uri, blob) 2-tuple for each of the items in the package.
"""
return iter(self._blobs.items())
@staticmethod
def read(path):
"""
Return a |PhysPkg| instance loaded with contents of OPC package at
*path*, where *path* can be either a regular zip package or a
directory containing an expanded package.
"""
if os.path.isdir(path):
return DirPhysPkg.read(path)
else:
return ZipPhysPkg.read(path)
@property
def root_uri(self):
return self._root_uri # pragma: no cover
@staticmethod
def write_to_dir(blobs, dirpath):
"""
Write the contents of the |BlobCollection| instance *blobs* to a
directory at *dirpath*. If a directory already exists at *dirpath*,
it is deleted before being recreated. If a file exists at *dirpath*,
|ValueError| is raised, to prevent unintentional overwriting.
"""
PhysPkg._clear_or_make_dir(dirpath)
for uri, blob in blobs.items():
PhysPkg._write_blob_to_dir(dirpath, uri, blob)
@staticmethod
def write_to_zip(blobs, pkg_zip_path):
"""
Write "files" in |BlobCollection| instance *blobs* to a zip archive
at *pkg_zip_path*.
"""
zipf = ZipFile(pkg_zip_path, 'w', ZIP_DEFLATED)
for uri in sorted(blobs.keys()):
blob = blobs[uri]
zipf.writestr(uri, blob)
zipf.close()
@staticmethod
def _clear_or_make_dir(dirpath):
"""
Create a new, empty directory at *dirpath*, removing and recreating
any directory found there. Raises |ValueError| if *dirpath* exists
but is not a directory.
"""
# raise if *dirpath* is a file
if os.path.exists(dirpath) and not os.path.isdir(dirpath):
tmpl = "target path '%s' is not a directory"
raise ValueError(tmpl % dirpath)
# remove any existing directory tree at *dirpath*
if os.path.exists(dirpath):
shutil.rmtree(dirpath)
# create dir at dirpath, as well as any intermediate-level dirs
os.makedirs(dirpath)
@staticmethod
def _write_blob_to_dir(dirpath, uri, blob):
"""
Write *blob* to a file under *dirpath*, where the segments of *uri*
that precede the filename are created, as required, as intermediate
directories.
"""
# In general, uri will contain forward slashes as segment separators.
# This next line converts them to backslashes on Windows.
item_relpath = os.path.normpath(uri)
fullpath = os.path.join(dirpath, item_relpath)
dirpath, filename = os.path.split(fullpath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with open(fullpath, 'wb') as f:
f.write(blob)
class DirPhysPkg(PhysPkg):
"""
An OPC physical package that has been expanded into individual files in
a directory structure that mirrors the pack URI.
"""
def __init__(self, blobs, root_uri):
super(DirPhysPkg, self).__init__(blobs, root_uri)
@classmethod
def read(cls, pkg_dir):
"""
Return a |BlobCollection| instance loaded from *pkg_dir*.
"""
blobs = BlobCollection()
pfx_len = len(pkg_dir)+1
for filepath in cls._filepaths_in_dir(pkg_dir):
uri = filepath[pfx_len:].replace('\\', '/')
with open(filepath, 'rb') as f:
blob = f.read()
blobs[uri] = blob
root_uri = pkg_dir
return cls(blobs, root_uri)
@staticmethod
def _filepaths_in_dir(dirpath):
"""
Return a sorted list of relative paths, one for each of the files
under *dirpath*, recursively visiting all subdirectories.
"""
filepaths = []
for root, dirnames, filenames in os.walk(dirpath):
for filename in filenames:
filepath = os.path.join(root, filename)
filepaths.append(filepath)
return sorted(filepaths)
class ZipPhysPkg(PhysPkg):
"""
An OPC physical package in the typically encountered form, a zip archive.
"""
def __init__(self, blobs, root_uri):
super(ZipPhysPkg, self).__init__(blobs, root_uri)
@classmethod
def read(cls, pkg_zip_path):
"""
Return a |BlobCollection| instance loaded from *pkg_zip_path*.
"""
blobs = BlobCollection()
zipf = ZipFile(pkg_zip_path, 'r')
for name in zipf.namelist():
blobs[name] = zipf.read(name)
zipf.close()
root_uri = os.path.splitext(pkg_zip_path)[0]
return cls(blobs, root_uri)
|
pombredanne/opc-diag
|
opcdiag/phys_pkg.py
|
Python
|
mit
| 5,859
| 0
|
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
from ckanext.geonetwork.harvesters.geonetwork import GeoNetworkHarvester
from ckanext.geonetwork.harvesters.utils import GeoNetworkClient
|
geosolutions-it/ckanext-geonetwork
|
ckanext/geonetwork/harvesters/__init__.py
|
Python
|
gpl-3.0
| 309
| 0
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# pylint: disable=g-bad-import-order
from isl import augment
from isl import test_util
from isl import util
flags = tf.flags
test = tf.test
lt = tf.contrib.labeled_tensor
FLAGS = flags.FLAGS
class CorruptTest(test_util.Base):
def setUp(self):
super(CorruptTest, self).setUp()
self.signal_lt = lt.select(self.input_lt, {'mask': util.slice_1(False)})
rc = lt.ReshapeCoder(['z', 'channel', 'mask'], ['channel'])
self.corrupt_coded_lt = augment.corrupt(0.1, 0.05, 0.1,
rc.encode(self.signal_lt))
self.corrupt_lt = rc.decode(self.corrupt_coded_lt)
def test_name(self):
self.assertIn('corrupt', self.corrupt_coded_lt.name)
def test(self):
self.assertEqual(self.corrupt_lt.axes, self.signal_lt.axes)
self.save_images('corrupt', [self.get_images('', self.corrupt_lt)])
self.assert_images_near('corrupt', True)
class AugmentTest(test_util.Base):
def setUp(self):
super(AugmentTest, self).setUp()
ap = augment.AugmentParameters(0.1, 0.05, 0.1)
self.input_augment_lt, self.target_augment_lt = augment.augment(
ap, self.input_lt, self.target_lt)
def test_name(self):
self.assertIn('augment/input', self.input_augment_lt.name)
self.assertIn('augment/target', self.target_augment_lt.name)
def test(self):
self.assertEqual(self.input_augment_lt.axes, self.input_lt.axes)
self.assertEqual(self.target_augment_lt.axes, self.target_lt.axes)
self.save_images('augment', [
self.get_images('input_', self.input_augment_lt),
self.get_images('target_', self.target_augment_lt)
])
self.assert_images_near('augment', True)
if __name__ == '__main__':
test.main()
|
google/in-silico-labeling
|
isl/augment_test.py
|
Python
|
apache-2.0
| 2,507
| 0.002792
|
import pytest
from webdriverwrapper.exceptions import InfoMessagesException
def test_check_info_messages(driver_info_msgs):
with pytest.raises(InfoMessagesException) as excinfo:
driver_info_msgs.check_infos(expected_info_messages=('some-info',))
def test_check_expected_info_messages(driver_info_msgs):
driver_info_msgs.check_infos(expected_info_messages=('some-info', 'another-info'))
def test_check_allowed_info_messages(driver_info_msgs):
driver_info_msgs.check_infos(allowed_info_messages=('some-info', 'another-info'))
def test_check_expected_and_allowed_info_messages(driver_info_msgs):
driver_info_msgs.check_infos(expected_info_messages=('some-info',), allowed_info_messages=('another-info',))
|
horejsek/python-webdriverwrapper
|
tests/test_info.py
|
Python
|
mit
| 735
| 0.004082
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq import IqProtocolEntity
class PingIqProtocolEntity(IqProtocolEntity):
'''
Receive
<iq type="get" xmlns="urn:xmpp:ping" from="s.whatsapp.net" id="1416174955-ping">
</iq>
Send
<iq type="get" xmlns="w:p" to="s.whatsapp.net" id="1416174955-ping">
</iq>
'''
def __init__(self, _from = None, to = None, _id = None):
super(PingIqProtocolEntity, self).__init__("urn:xmpp:ping" if _from else "w:p", _id = _id, _type = "get", _from = _from, to = to)
|
felix-dumit/campusbot
|
yowsup2/yowsup/layers/protocol_iq/protocolentities/iq_ping.py
|
Python
|
mit
| 555
| 0.030631
|
#!/usr/bin/env python3
import math, logging, threading, concurrent.futures
import numpy
import simplespectral
from soapypower import threadpool
logger = logging.getLogger(__name__)
class PSD:
"""Compute averaged power spectral density using Welch's method"""
def __init__(self, bins, sample_rate, fft_window='hann', fft_overlap=0.5,
crop_factor=0, log_scale=True, remove_dc=False, detrend=None,
lnb_lo=0, max_threads=0, max_queue_size=0):
self._bins = bins
self._sample_rate = sample_rate
self._fft_window = fft_window
self._fft_overlap = fft_overlap
self._fft_overlap_bins = math.floor(self._bins * self._fft_overlap)
self._crop_factor = crop_factor
self._log_scale = log_scale
self._remove_dc = remove_dc
self._detrend = detrend
self._lnb_lo = lnb_lo
self._executor = threadpool.ThreadPoolExecutor(
max_workers=max_threads,
max_queue_size=max_queue_size,
thread_name_prefix='PSD_thread'
)
self._base_freq_array = numpy.fft.fftfreq(self._bins, 1 / self._sample_rate)
def set_center_freq(self, center_freq):
"""Set center frequency and clear averaged PSD data"""
psd_state = {
'repeats': 0,
'freq_array': self._base_freq_array + self._lnb_lo + center_freq,
'pwr_array': None,
'update_lock': threading.Lock(),
'futures': [],
}
return psd_state
def result(self, psd_state):
"""Return freqs and averaged PSD for given center frequency"""
freq_array = numpy.fft.fftshift(psd_state['freq_array'])
pwr_array = numpy.fft.fftshift(psd_state['pwr_array'])
if self._crop_factor:
crop_bins_half = round((self._crop_factor * self._bins) / 2)
freq_array = freq_array[crop_bins_half:-crop_bins_half]
pwr_array = pwr_array[crop_bins_half:-crop_bins_half]
if psd_state['repeats'] > 1:
pwr_array = pwr_array / psd_state['repeats']
if self._log_scale:
pwr_array = 10 * numpy.log10(pwr_array)
return (freq_array, pwr_array)
def wait_for_result(self, psd_state):
"""Wait for all PSD threads to finish and return result"""
if len(psd_state['futures']) > 1:
concurrent.futures.wait(psd_state['futures'])
elif psd_state['futures']:
psd_state['futures'][0].result()
return self.result(psd_state)
def result_async(self, psd_state):
"""Return freqs and averaged PSD for given center frequency (asynchronously in another thread)"""
return self._executor.submit(self.wait_for_result, psd_state)
def _release_future_memory(self, future):
"""Remove result from future to release memory"""
future._result = None
def update(self, psd_state, samples_array):
"""Compute PSD from samples and update average for given center frequency"""
freq_array, pwr_array = simplespectral.welch(samples_array, self._sample_rate, nperseg=self._bins,
window=self._fft_window, noverlap=self._fft_overlap_bins,
detrend=self._detrend)
if self._remove_dc:
pwr_array[0] = (pwr_array[1] + pwr_array[-1]) / 2
with psd_state['update_lock']:
psd_state['repeats'] += 1
if psd_state['pwr_array'] is None:
psd_state['pwr_array'] = pwr_array
else:
psd_state['pwr_array'] += pwr_array
def update_async(self, psd_state, samples_array):
"""Compute PSD from samples and update average for given center frequency (asynchronously in another thread)"""
future = self._executor.submit(self.update, psd_state, samples_array)
future.add_done_callback(self._release_future_memory)
psd_state['futures'].append(future)
return future
|
xmikos/soapy_power
|
soapypower/psd.py
|
Python
|
mit
| 4,040
| 0.001733
|
import os
import pandas as pd
from igf_data.utils.seqrunutils import get_seqrun_date_from_igf_id
def _count_total_reads(data,seqrun_list):
'''
An internal function for counting total reads
required params:
:param data, A dictionary containing seqrun ids a key an read counts as values
:param seqrun_list, A list of sequencing runs
'''
try:
data['run_count'] = 0
if 'total_read' not in data:
data['total_read']=0
if len(seqrun_list) >1:
for run in seqrun_list:
if data[run] > 0:
data['run_count'] += 1
data['total_read'] += data[run]
#if data['run_count'] == 1:
# data['total_read'] = 0
return data
except:
raise
def convert_project_data_gviz_data(input_data,
sample_col='sample_igf_id',
read_count_col='attribute_value',
seqrun_col='flowcell_id'):
'''
A utility method for converting project's data availability information to
gviz data table format
https://developers.google.com/chart/interactive/docs/reference#DataTable
required params:
:param input_data: A pandas data frame, it should contain following columns
sample_igf_id,
flowcell_id,
attribute_value (R1_READ_COUNT)
:param sample_col, Column name for sample id, default sample_igf_id
:param seqrun_col, Column name for sequencing run identifier, default flowcell_id
:param read_count_col, Column name for sample read counts, default attribute_value
return
a dictionary of description
a list of data dictionary
a tuple of column_order
'''
try:
if not isinstance(input_data, pd.DataFrame):
raise AttributeError('Expecting a pandas dataframe and got {0}'.\
format(type(input_data)))
input_data[read_count_col]=input_data[read_count_col].astype(float) # convert reac counts to int
processed_data=input_data.\
pivot_table(values=read_count_col,
index=[sample_col,
seqrun_col],
aggfunc='sum') # group data by sample id and seq runs
processed_data.\
reset_index([sample_col,
seqrun_col],
inplace=True) # reset index for processed data
intermediate_data=list() # define empty intermediate data structure
seqrun_set=set() # define empty seqrun set
for line in processed_data.to_dict(orient='records'): # reformat processed data to required structure
tmp_data=dict()
tmp_data.update({sample_col:line[sample_col],
line[seqrun_col]:line[read_count_col]})
seqrun_set.add(line[seqrun_col])
intermediate_data.append(tmp_data)
intermediate_data=pd.DataFrame(intermediate_data) # convert intermediate data to dataframe
intermediate_data.fillna(0,inplace=True) # replace NAN values with zero
intermediate_data=intermediate_data.\
pivot_table(index=sample_col,
aggfunc='sum').\
reset_index(sample_col) # group data by samples id
intermediate_data=intermediate_data.\
apply(lambda line: \
_count_total_reads(data=line,
seqrun_list=list(seqrun_set)),
axis=1) # count total reads for multiple seq runs
multiple_run_data=intermediate_data[intermediate_data['run_count'] > 1] # check for multi run projects
if len(multiple_run_data.index)==0 and \
'total_read' in multiple_run_data.columns:
intermediate_data.drop('total_read',axis=1,inplace=True) # drop the total read column if all samples are single run
if 'run_count' in intermediate_data.columns:
intermediate_data.drop('run_count',axis=1,inplace=True) # removing run_count column
intermediate_data.fillna(0,inplace=True) # fail safe for missing samples
description = {sample_col: ("string", "Sample ID")} # define description
if len(list(seqrun_set)) >1 and \
'total_read' in intermediate_data.columns:
description.update({"total_read":("number", "Total Reads")}) # add total read column for samples with multiple runs
intermediate_data['total_read']=intermediate_data['total_read'].\
astype(float) # convert column to number
for run in list(seqrun_set):
description.update({run:("number",run)}) # add seqrun columns
intermediate_data[run]=intermediate_data[run].\
astype(float) # convert column to number
column_list=[sample_col] # define column order
column_list.extend(list(seqrun_set))
if len(list(seqrun_set)) > 1 and \
'total_read' in intermediate_data.columns:
column_list.append('total_read') # total read is present only for multiple runs
intermediate_data=intermediate_data.to_dict(orient='records') # convert data frame to json
column_order=tuple(column_list)
return description,intermediate_data,column_order
except:
raise
def _modify_seqrun_data(data_series,seqrun_col,flowcell_col,path_col):
'''
An internal method for parsing seqrun dataframe and adding remote dir path
required columns: seqrun_igf_id, flowcell_id
:param seqrun_col, Column name for sequencing run id, default seqrun_igf_id
:param flowcell_col, Column namae for flowcell id, default flowcell_id
:param path_col, Column name for path, default path
returns a data series with following columns: flowcell_id, path
'''
try:
if not isinstance(data_series,pd.Series):
raise AttributeError('Expecting a pandas data series and got {0}'.\
format(type(data_series)))
seqrun_igf_id=data_series[seqrun_col]
flowcell_id=data_series[flowcell_col]
seqrun_date=get_seqrun_date_from_igf_id(seqrun_igf_id)
data_series[path_col]=os.path.join(seqrun_date,flowcell_id) # adding path to data series
del data_series[seqrun_col]
return data_series
except:
raise
def add_seqrun_path_info(input_data,output_file,seqrun_col='seqrun_igf_id',
flowcell_col='flowcell_id',path_col='path'):
'''
A utility method for adding remote path to a dataframe for each sequencing runs
of a project
required params:
:param input_data, A input dataframe containing the following columns
seqrun_igf_id
flowcell_id
:param seqrun_col, Column name for sequencing run id, default seqrun_igf_id
:param flowcell_col, Column namae for flowcell id, default flowcell_id
:param path_col, Column name for path, default path
output_file: An output filepath for the json data
'''
try:
if not isinstance(input_data,pd.DataFrame):
raise AttributeError('Expecting a pandas dataframe and got {0}'.\
format(type(input_data)))
input_data.drop_duplicates(inplace=True) # remove duplicate entries
input_data=input_data.\
apply(lambda line: \
_modify_seqrun_data(data_series=line,
seqrun_col=seqrun_col,
flowcell_col=flowcell_col,
path_col=path_col),
axis=1) # add remote seqrun path
input_data=input_data.to_json(orient='records') # encode output json
with open(output_file,'w') as j_data:
j_data.write(input_data) # write output json file
except:
raise
|
imperial-genomics-facility/data-management-python
|
igf_data/utils/project_data_display_utils.py
|
Python
|
apache-2.0
| 8,493
| 0.01672
|
"""
Classes for using robotic or other hardware using Topographica.
This module contains several classes for constructing robotics
interfaces to Topographica simulations. It includes modules that read
input from or send output to robot devices, and a (quasi) real-time
simulation object that attempts to maintain a correspondence between
simulation time and real time.
This module requires the PlayerStage robot interface system (from
playerstage.sourceforge.net), and the playerrobot module for
high-level communications with Player robots.
"""
import Image
import ImageOps
from math import pi,cos,sin
import param
from topo.base.simulation import EventProcessor
from imagen.image import GenericImage
from playerrobot import CameraDevice, PTZDevice
class CameraImage(GenericImage):
"""
An image pattern generator that gets its image from a Player
camera device.
"""
camera = param.ClassSelector(CameraDevice,default=None,doc="""
An instance of playerrobot.CameraDevice to be used
to generate images.""")
def __init__(self,**params):
super(CameraImage,self).__init__(**params)
self._image = None
def _get_image(self,params):
self._decode_image(*self.camera.image)
return True
def _decode_image(self,fmt,w,h,bpp,fdiv,data):
if fmt==1:
self._image = Image.new('L',(w,h))
self._image.fromstring(data,'raw')
else:
# JPALERT: if not grayscale, then assume color. This
# should be expanded for other modes.
rgb_im = Image.new('RGB',(w,h))
rgb_im.fromstring(data,'raw')
self._image = ImageOps.grayscale(rgb_im)
class CameraImageQueued(CameraImage):
"""
A version of CameraImage that gets the image from the camera's image queue,
rather than directly from the camera object. Using queues is
necessary when running the playerrobot in a separate process
without shared memory. When getting an image, this pattern
generator will fetch every image in the image queue and use the
most recent as the current pattern.
"""
def _get_image(self,params):
im_spec = None
if self._image is None:
# if we don't have an image then block until we get one
im_spec = self.camera.image_queue.get()
self.camera.image_queue.task_done()
# Make sure we clear the image queue and get the most recent image.
while not self.camera.image_queue.empty():
im_spec = self.camera.image_queue.get_nowait()
self.camera.image_queue.task_done()
if im_spec:
# If we got a new image from the queue, then
# construct a PIL image from it.
self._decode_image(*im_spec)
return True
else:
return False
class PTZ(EventProcessor):
"""
Pan/Tilt/Zoom control.
This event processor takes input events on its 'Saccade' input
port in the form of (amplitude,direction) saccade commands (as
produced by the topo.sheet.saccade.SaccadeController class) and
appropriately servoes the attached PTZ object. There is not
currently any dynamic zoom control, though the static zoom level
can be set as a parameter.
"""
ptz = param.ClassSelector(PTZDevice,default=None,doc="""
An instance of playerrobot.PTZDevice to be controlled.""")
zoom = param.Number(default=120,bounds=(0,None),doc="""
Desired FOV width in degrees.""")
speed = param.Number(default=200,bounds=(0,None),doc="""
Desired max pan/tilt speed in deg/sec.""")
invert_amplitude = param.Boolean(default=False,doc="""
Invert the sense of the amplitude signal, in order to get the
appropriate ipsi-/contralateral sense of saccades.""")
dest_ports = ["Saccade"]
src_ports = ["State"]
def start(self):
pass
def input_event(self,conn,data):
if conn.dest_port == "Saccade":
# the data should be (amplitude,direction)
amplitude,direction = data
self.shift(amplitude,direction)
def shift(self,amplitude,direction):
self.debug("Executing shift, amplitude=%.2f, direction=%.2f"%(amplitude,direction))
if self.invert_amplitude:
amplitude *= -1
# if the amplitude is negative, invert the direction, so up is still up.
if amplitude < 0:
direction *= -1
angle = direction * pi/180
pan,tilt,zoom = self.ptz.state_deg
pan += amplitude * cos(angle)
tilt += amplitude * sin(angle)
self.ptz.set_ws_deg(pan,tilt,self.zoom,self.speed,self.speed)
## self.ptz.cmd_queue.put_nowait(('set_ws_deg',
## (pan,tilt,self.zoom,self.speed,self.speed)))
|
ioam/topographica
|
topo/hardware/robotics.py
|
Python
|
bsd-3-clause
| 4,844
| 0.010735
|
#!/usr/bin/env python
'''
A/V control for System76 laptop using Unity
'''
import os
from execute import returncode
# check for the existence of /dev/video0 which is used currently for webcam
webcam = lambda: os.path.exists('/dev/video0') == False
def webcam_toggle():
if webcam():
returncode('sudo /sbin/modprobe uvcvideo')
else:
returncode('sudo /sbin/modprobe -rv uvcvideo')
# use the amixer application to glean the status of the microphone
microphone = lambda: returncode("amixer get Capture | grep Capt | grep off") == 0
microphone_toggle = lambda: returncode("amixer set Capture toggle")
def main():
print "Mic muted ? {0}, Webcam off ? {1}".format(microphone(), webcam())
if __name__ == '__main__':
main()
|
mgmtech/sys76_unity_webmic
|
unity_avindicator/webmic.py
|
Python
|
gpl-3.0
| 751
| 0.011984
|
"""
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print df.format('jS F Y H:i')
7th October 2003 11:39
>>>
"""
import re
import time
import calendar
from django.utils.dates import MONTHS, MONTHS_3, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDfFgGhHiIjlLmMnNOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_unicode(formatstr))):
if i % 2:
pieces.append(force_unicode(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return u''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, t):
self.data = t
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return u'%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return u'%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return u'%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return u'%02d' % self.data.minute
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return u'%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return u'%02d' % self.data.second
def u(self):
"Microseconds"
return self.data.microsecond
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def __init__(self, dt):
# Accepts either a datetime or date object.
self.data = dt
self.timezone = getattr(dt, 'tzinfo', None)
if hasattr(self.data, 'hour') and not self.timezone:
self.timezone = LocalTimezone(dt)
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return u'%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return u'1'
else:
return u'0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return u'%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def O(self):
"Difference to Greenwich time in hours; e.g. '+0200'"
seconds = self.Z()
return u"%+03d%02d" % (seconds // 3600, (seconds // 60) % 60)
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return u'th'
last = self.data.day % 10
if last == 1:
return u'st'
if last == 2:
return u'nd'
if last == 3:
return u'rd'
return u'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return u'%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def T(self):
"Time zone of this machine; e.g. 'EST' or 'MDT'"
name = self.timezone and self.timezone.tzname(self.data) or None
if name is None:
name = self.format('O')
return unicode(name)
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if getattr(self.data, 'tzinfo', None):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return unicode(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
"""
if not self.timezone:
return 0
offset = self.timezone.utcoffset(self.data)
# Only days can be negative, so negative offsets have days=-1 and
# seconds positive. Positive offsets have days=0
return offset.days * 86400 + offset.seconds
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.2/django/utils/dateformat.py
|
Python
|
bsd-3-clause
| 8,796
| 0.002046
|
#!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets run distribution package tests."""
import os.path
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
copy_rel_path=None,
timeout_seconds=30 * 60):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
# the entire repo will be cloned if copy_rel_path is not set.
if copy_rel_path:
environ['RELATIVE_COPY_PATH'] = copy_rel_path
docker_args = []
for k, v in list(environ.items()):
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'
}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='distribtest.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name,
cmdline,
environ=None,
shell=False,
flake_retries=0,
timeout_retries=0,
use_workspace=False,
timeout_seconds=10 * 60):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
jobspec = jobset.JobSpec(cmdline=cmdline,
environ=environ,
shortname='distribtest.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpDistribTest(object):
"""Tests C# NuGet package"""
def __init__(self,
platform,
arch,
docker_suffix=None,
use_dotnet_cli=False,
presubmit=False):
self.name = 'csharp_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'csharp', platform, arch]
if presubmit:
self.labels.append('presubmit')
self.script_suffix = ''
if docker_suffix:
self.name += '_%s' % docker_suffix
self.labels.append(docker_suffix)
if use_dotnet_cli:
self.name += '_dotnetcli'
self.script_suffix = '_dotnetcli'
self.labels.append('dotnetcli')
else:
self.labels.append('olddotnet')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
del inner_jobs # arg unused as there is little opportunity for parallelizing whats inside the distribtests
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/csharp_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/csharp/run_distrib_test%s.sh' %
self.script_suffix,
copy_rel_path='test/distrib')
elif self.platform == 'macos':
return create_jobspec(self.name, [
'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix
],
environ={'EXTERNAL_GIT_ROOT': '../../../..'},
use_workspace=True)
elif self.platform == 'windows':
if self.arch == 'x64':
# Use double leading / as the first occurrence gets removed by msys bash
# when invoking the .bat file (side-effect of posix path conversion)
environ = {
'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'
}
else:
environ = {'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
return create_jobspec(self.name, [
'test\\distrib\\csharp\\run_distrib_test%s.bat' %
self.script_suffix
],
environ=environ,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class PythonDistribTest(object):
"""Tests Python package"""
def __init__(self,
platform,
arch,
docker_suffix,
source=False,
presubmit=False):
self.source = source
if source:
self.name = 'python_dev_%s_%s_%s' % (platform, arch, docker_suffix)
else:
self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
if presubmit:
self.labels.append('presubmit')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
if not self.platform == 'linux':
raise Exception("Not supported yet.")
if self.source:
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/python_dev_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/python/run_source_distrib_test.sh',
copy_rel_path='test/distrib')
else:
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/python_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/python/run_binary_distrib_test.sh',
copy_rel_path='test/distrib')
def __str__(self):
return self.name
class RubyDistribTest(object):
"""Tests Ruby package"""
def __init__(self,
platform,
arch,
docker_suffix,
ruby_version=None,
source=False,
presubmit=False):
self.package_type = 'binary'
if source:
self.package_type = 'source'
self.name = 'ruby_%s_%s_%s_version_%s_package_type_%s' % (
platform, arch, docker_suffix, ruby_version or
'unspecified', self.package_type)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.ruby_version = ruby_version
self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
if presubmit:
self.labels.append('presubmit')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
arch_to_gem_arch = {
'x64': 'x86_64',
'x86': 'x86',
}
if not self.platform == 'linux':
raise Exception("Not supported yet.")
dockerfile_name = 'tools/dockerfile/distribtest/ruby_%s_%s' % (
self.docker_suffix, self.arch)
if self.ruby_version is not None:
dockerfile_name += '_%s' % self.ruby_version
return create_docker_jobspec(
self.name,
dockerfile_name,
'test/distrib/ruby/run_distrib_test.sh %s %s %s' %
(arch_to_gem_arch[self.arch], self.platform, self.package_type),
copy_rel_path='test/distrib')
def __str__(self):
return self.name
class PHP7DistribTest(object):
"""Tests PHP7 package"""
def __init__(self, platform, arch, docker_suffix=None, presubmit=False):
self.name = 'php7_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'php', 'php7', platform, arch]
if presubmit:
self.labels.append('presubmit')
if docker_suffix:
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/php7_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/php/run_distrib_test.sh',
copy_rel_path='test/distrib')
elif self.platform == 'macos':
return create_jobspec(
self.name, ['test/distrib/php/run_distrib_test_macos.sh'],
environ={'EXTERNAL_GIT_ROOT': '../../../..'},
timeout_seconds=15 * 60,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class CppDistribTest(object):
"""Tests Cpp make install by building examples."""
def __init__(self,
platform,
arch,
docker_suffix=None,
testcase=None,
presubmit=False):
if platform == 'linux':
self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix,
testcase)
else:
self.name = 'cpp_%s_%s_%s' % (platform, arch, testcase)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.testcase = testcase
self.labels = [
'distribtest',
'cpp',
platform,
arch,
testcase,
]
if presubmit:
self.labels.append('presubmit')
if docker_suffix:
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
environ = {}
if inner_jobs is not None:
# set number of parallel jobs for the C++ build
environ['GRPC_CPP_DISTRIBTEST_BUILD_COMPILER_JOBS'] = str(
inner_jobs)
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/cpp_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase,
timeout_seconds=45 * 60)
elif self.platform == 'windows':
return create_jobspec(
self.name,
['test\\distrib\\cpp\\run_distrib_test_%s.bat' % self.testcase],
environ={},
timeout_seconds=30 * 60,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return [
# C++
CppDistribTest('linux',
'x64',
'jessie',
'cmake_as_submodule',
presubmit=True),
CppDistribTest('linux', 'x64', 'stretch', 'cmake', presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_as_externalproject',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_fetchcontent',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_module_install',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_module_install_pkgconfig',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_pkgconfig',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch_aarch64_cross',
'cmake_aarch64_cross',
presubmit=True),
CppDistribTest('windows', 'x86', testcase='cmake', presubmit=True),
CppDistribTest('windows',
'x86',
testcase='cmake_as_externalproject',
presubmit=True),
# C#
CSharpDistribTest('linux', 'x64', 'jessie', presubmit=True),
CSharpDistribTest('linux', 'x64', 'stretch'),
CSharpDistribTest('linux',
'x64',
'stretch',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('linux', 'x64', 'centos7'),
CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
CSharpDistribTest('linux', 'x64', 'ubuntu1604', use_dotnet_cli=True),
CSharpDistribTest('linux',
'x64',
'alpine',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('linux',
'x64',
'dotnet31',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('linux',
'x64',
'dotnet5',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('macos', 'x64', presubmit=True),
CSharpDistribTest('windows', 'x86', presubmit=True),
CSharpDistribTest('windows', 'x64', presubmit=True),
# Python
PythonDistribTest('linux', 'x64', 'buster', presubmit=True),
PythonDistribTest('linux', 'x86', 'buster', presubmit=True),
PythonDistribTest('linux', 'x64', 'centos7'),
PythonDistribTest('linux', 'x64', 'fedora34'),
PythonDistribTest('linux', 'x64', 'opensuse'),
PythonDistribTest('linux', 'x64', 'arch'),
PythonDistribTest('linux', 'x64', 'ubuntu1804'),
PythonDistribTest('linux', 'aarch64', 'python38_buster',
presubmit=True),
PythonDistribTest('linux',
'x64',
'alpine3.7',
source=True,
presubmit=True),
PythonDistribTest('linux', 'x64', 'buster', source=True,
presubmit=True),
PythonDistribTest('linux', 'x86', 'buster', source=True,
presubmit=True),
PythonDistribTest('linux', 'x64', 'centos7', source=True),
PythonDistribTest('linux', 'x64', 'fedora34', source=True),
PythonDistribTest('linux', 'x64', 'arch', source=True),
PythonDistribTest('linux', 'x64', 'ubuntu1804', source=True),
# Ruby
RubyDistribTest('linux', 'x64', 'stretch', ruby_version='ruby_2_5'),
RubyDistribTest('linux', 'x64', 'stretch', ruby_version='ruby_2_6'),
RubyDistribTest('linux',
'x64',
'stretch',
ruby_version='ruby_2_7',
presubmit=True),
# TODO(apolcyn): add a ruby 3.0 test once protobuf adds support
RubyDistribTest('linux',
'x64',
'stretch',
ruby_version='ruby_2_5',
source=True,
presubmit=True),
RubyDistribTest('linux', 'x64', 'centos7'),
RubyDistribTest('linux', 'x64', 'ubuntu1604'),
RubyDistribTest('linux', 'x64', 'ubuntu1804', presubmit=True),
# PHP7
PHP7DistribTest('linux', 'x64', 'stretch', presubmit=True),
PHP7DistribTest('macos', 'x64', presubmit=True),
]
|
ctiller/grpc
|
tools/run_tests/artifacts/distribtest_targets.py
|
Python
|
apache-2.0
| 17,548
| 0.000399
|
# -*- test-case-name: calendarserver.tap.test.test_caldav -*-
##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Utilities for assembling the service and resource hierarchy.
"""
__all__ = [
"getRootResource",
"getDBPool",
"FakeRequest",
"MemoryLimitService",
]
import errno
import os
from socket import fromfd, AF_UNIX, SOCK_STREAM, socketpair
import psutil
from twext.python.filepath import CachingFilePath as FilePath
from twext.python.log import Logger
from txweb2.auth.basic import BasicCredentialFactory
from txweb2.dav import auth
from txweb2.dav.util import joinURL
from txweb2.http_headers import Headers
from txweb2.resource import Resource
from txweb2.static import File as FileResource
from twisted.application.service import Service
from twisted.cred.portal import Portal
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred, succeed
from twisted.internet import reactor as _reactor
from twisted.internet.reactor import addSystemEventTrigger
from twisted.internet.tcp import Connection
from calendarserver.push.applepush import APNSubscriptionResource
from calendarserver.push.notifier import NotifierFactory
from twext.enterprise.adbapi2 import ConnectionPool, ConnectionPoolConnection
from twext.enterprise.ienterprise import ORACLE_DIALECT
from twext.enterprise.ienterprise import POSTGRES_DIALECT
from twistedcaldav.bind import doBind
from twistedcaldav.cache import CacheStoreNotifierFactory
from twistedcaldav.directory.addressbook import DirectoryAddressBookHomeProvisioningResource
from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
from twistedcaldav.directory.digest import QopDigestCredentialFactory
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
from twistedcaldav.directorybackedaddressbook import DirectoryBackedAddressBookResource
from twistedcaldav.resource import AuthenticationWrapper
from twistedcaldav.simpleresource import SimpleResource, SimpleRedirectResource
from twistedcaldav.timezones import TimezoneCache
from twistedcaldav.timezoneservice import TimezoneServiceResource
from twistedcaldav.timezonestdservice import TimezoneStdServiceResource
from txdav.caldav.datastore.scheduling.ischedule.dkim import DKIMUtils, DomainKeyResource
from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
try:
from twistedcaldav.authkerb import NegotiateCredentialFactory
NegotiateCredentialFactory # pacify pyflakes
except ImportError:
NegotiateCredentialFactory = None
from twext.enterprise.adbapi2 import ConnectionPoolClient
from txdav.base.datastore.dbapiclient import DBAPIConnector, OracleConnector
from txdav.base.datastore.dbapiclient import postgresPreflight
from txdav.base.datastore.subpostgres import PostgresService
from calendarserver.accesslog import DirectoryLogWrapperResource
from calendarserver.provision.root import RootResource
from calendarserver.tools.util import checkDirectory
from calendarserver.webadmin.landing import WebAdminLandingResource
from calendarserver.webcal.resource import WebCalendarResource
from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.sql import CommonDataStore as CommonSQLDataStore
from txdav.common.datastore.file import CommonDataStore as CommonFileDataStore
from txdav.common.datastore.sql import current_sql_schema
from txdav.common.datastore.upgrade.sql.upgrade import NotAllowedToUpgrade
from twext.python.filepath import CachingFilePath
from urllib import quote
from twisted.python.usage import UsageError
from twext.who.checker import UsernamePasswordCredentialChecker
from twext.who.checker import HTTPDigestCredentialChecker
from twisted.cred.error import UnauthorizedLogin
from txweb2.dav.auth import IPrincipalCredentials
log = Logger()
def pgServiceFromConfig(config, subServiceFactory, uid=None, gid=None):
"""
Construct a L{PostgresService} from a given configuration and subservice.
@param config: the configuration to derive postgres configuration
parameters from.
@param subServiceFactory: A factory for the service to start once the
L{PostgresService} has been initialized.
@param uid: The user-ID to run the PostgreSQL server as.
@param gid: The group-ID to run the PostgreSQL server as.
@return: a service which can start postgres.
@rtype: L{PostgresService}
"""
dbRoot = CachingFilePath(config.DatabaseRoot)
# Construct a PostgresService exactly as the parent would, so that we
# can establish connection information.
return PostgresService(
dbRoot, subServiceFactory, current_sql_schema,
databaseName=config.Postgres.DatabaseName,
clusterName=config.Postgres.ClusterName,
logFile=config.Postgres.LogFile,
logDirectory=config.LogRoot if config.Postgres.LogRotation else "",
socketDir=config.Postgres.SocketDirectory,
listenAddresses=config.Postgres.ListenAddresses,
sharedBuffers=config.Postgres.SharedBuffers,
maxConnections=config.Postgres.MaxConnections,
options=config.Postgres.Options,
uid=uid, gid=gid,
spawnedDBUser=config.SpawnedDBUser,
importFileName=config.DBImportFile,
pgCtl=config.Postgres.Ctl,
initDB=config.Postgres.Init,
)
def pgConnectorFromConfig(config):
"""
Create a postgres DB-API connector from the given configuration.
"""
import pgdb
return DBAPIConnector(pgdb, postgresPreflight, config.DSN).connect
def oracleConnectorFromConfig(config):
"""
Create a postgres DB-API connector from the given configuration.
"""
return OracleConnector(config.DSN).connect
class ConnectionWithPeer(Connection):
connected = True
def getPeer(self):
return "<peer: %r %r>" % (self.socket.fileno(), id(self))
def getHost(self):
return "<host: %r %r>" % (self.socket.fileno(), id(self))
def transactionFactoryFromFD(dbampfd, dialect, paramstyle):
"""
Create a transaction factory from an inherited file descriptor, such as one
created by L{ConnectionDispenser}.
"""
skt = fromfd(dbampfd, AF_UNIX, SOCK_STREAM)
os.close(dbampfd)
protocol = ConnectionPoolClient(dialect=dialect, paramstyle=paramstyle)
transport = ConnectionWithPeer(skt, protocol)
protocol.makeConnection(transport)
transport.startReading()
return protocol.newTransaction
class ConnectionDispenser(object):
"""
A L{ConnectionDispenser} can dispense already-connected file descriptors,
for use with subprocess spawning.
"""
# Very long term FIXME: this mechanism should ideally be eliminated, by
# making all subprocesses have a single stdio AMP connection that
# multiplexes between multiple protocols.
def __init__(self, connectionPool):
self.pool = connectionPool
def dispense(self):
"""
Dispense a socket object, already connected to a server, for a client
in a subprocess.
"""
# FIXME: these sockets need to be re-dispensed when the process is
# respawned, and they currently won't be.
c, s = socketpair(AF_UNIX, SOCK_STREAM)
protocol = ConnectionPoolConnection(self.pool)
transport = ConnectionWithPeer(s, protocol)
protocol.makeConnection(transport)
transport.startReading()
return c
def storeFromConfig(config, txnFactory, directoryService):
"""
Produce an L{IDataStore} from the given configuration, transaction factory,
and notifier factory.
If the transaction factory is C{None}, we will create a filesystem
store. Otherwise, a SQL store, using that connection information.
"""
#
# Configure NotifierFactory
#
notifierFactories = {}
if config.Notifications.Enabled:
notifierFactories["push"] = NotifierFactory(config.ServerHostName, config.Notifications.CoalesceSeconds)
if config.EnableResponseCache and config.Memcached.Pools.Default.ClientEnabled:
notifierFactories["cache"] = CacheStoreNotifierFactory()
quota = config.UserQuota
if quota == 0:
quota = None
if txnFactory is not None:
if config.EnableSSL:
uri = "https://{config.ServerHostName}:{config.SSLPort}".format(config=config)
else:
uri = "https://{config.ServerHostName}:{config.HTTPPort}".format(config=config)
attachments_uri = uri + "/calendars/__uids__/%(home)s/dropbox/%(dropbox_id)s/%(name)s"
store = CommonSQLDataStore(
txnFactory, notifierFactories,
directoryService,
FilePath(config.AttachmentsRoot), attachments_uri,
config.EnableCalDAV, config.EnableCardDAV,
config.EnableManagedAttachments,
quota=quota,
logLabels=config.LogDatabase.LabelsInSQL,
logStats=config.LogDatabase.Statistics,
logStatsLogFile=config.LogDatabase.StatisticsLogFile,
logSQL=config.LogDatabase.SQLStatements,
logTransactionWaits=config.LogDatabase.TransactionWaitSeconds,
timeoutTransactions=config.TransactionTimeoutSeconds,
cacheQueries=config.QueryCaching.Enabled,
cachePool=config.QueryCaching.MemcachedPool,
cacheExpireSeconds=config.QueryCaching.ExpireSeconds
)
else:
store = CommonFileDataStore(
FilePath(config.DocumentRoot),
notifierFactories, directoryService,
config.EnableCalDAV, config.EnableCardDAV,
quota=quota
)
# FIXME: NotifierFactories need a reference to the store in order
# to get a txn in order to possibly create a Work item
for notifierFactory in notifierFactories.values():
notifierFactory.store = store
return store
# MOVE2WHO -- should we move this class somewhere else?
class PrincipalCredentialChecker(object):
credentialInterfaces = (IPrincipalCredentials,)
@inlineCallbacks
def requestAvatarId(self, credentials):
credentials = IPrincipalCredentials(credentials)
if credentials.authnPrincipal is None:
raise UnauthorizedLogin(
"No such user: {user}".format(
user=credentials.credentials.username
)
)
# See if record is enabledForLogin
if not credentials.authnPrincipal.record.isLoginEnabled():
raise UnauthorizedLogin(
"User not allowed to log in: {user}".format(
user=credentials.credentials.username
)
)
# Handle Kerberos as a separate behavior
try:
from twistedcaldav.authkerb import NegotiateCredentials
except ImportError:
NegotiateCredentials = None
if NegotiateCredentials and isinstance(credentials.credentials,
NegotiateCredentials):
# If we get here with Kerberos, then authentication has already succeeded
returnValue(
(
credentials.authnPrincipal,
credentials.authzPrincipal,
)
)
else:
if (yield credentials.authnPrincipal.record.verifyCredentials(credentials.credentials)):
returnValue(
(
credentials.authnPrincipal,
credentials.authzPrincipal,
)
)
else:
raise UnauthorizedLogin(
"Incorrect credentials for user: {user}".format(
user=credentials.credentials.username
)
)
def getRootResource(config, newStore, resources=None):
"""
Set up directory service and resource hierarchy based on config.
Return root resource.
Additional resources can be added to the hierarchy by passing a list of
tuples containing: path, resource class, __init__ args list, and optional
authentication schemes list ("basic", "digest").
If the store is specified, then it has already been constructed, so use it.
Otherwise build one with L{storeFromConfig}.
"""
if newStore is None:
raise RuntimeError("Internal error, 'newStore' must be specified.")
if resources is None:
resources = []
# FIXME: this is only here to workaround circular imports
doBind()
#
# Default resource classes
#
rootResourceClass = RootResource
calendarResourceClass = DirectoryCalendarHomeProvisioningResource
iScheduleResourceClass = IScheduleInboxResource
conduitResourceClass = ConduitResource
timezoneServiceResourceClass = TimezoneServiceResource
timezoneStdServiceResourceClass = TimezoneStdServiceResource
webCalendarResourceClass = WebCalendarResource
webAdminResourceClass = WebAdminLandingResource
addressBookResourceClass = DirectoryAddressBookHomeProvisioningResource
directoryBackedAddressBookResourceClass = DirectoryBackedAddressBookResource
apnSubscriptionResourceClass = APNSubscriptionResource
principalResourceClass = DirectoryPrincipalProvisioningResource
directory = newStore.directoryService()
principalCollection = principalResourceClass("/principals/", directory)
#
# Configure the Site and Wrappers
#
wireEncryptedCredentialFactories = []
wireUnencryptedCredentialFactories = []
portal = Portal(auth.DavRealm())
portal.registerChecker(UsernamePasswordCredentialChecker(directory))
portal.registerChecker(HTTPDigestCredentialChecker(directory))
portal.registerChecker(PrincipalCredentialChecker())
realm = directory.realmName.encode("utf-8") or ""
log.info("Configuring authentication for realm: {realm}", realm=realm)
for scheme, schemeConfig in config.Authentication.iteritems():
scheme = scheme.lower()
credFactory = None
if schemeConfig["Enabled"]:
log.info("Setting up scheme: {scheme}", scheme=scheme)
if scheme == "kerberos":
if not NegotiateCredentialFactory:
log.info("Kerberos support not available")
continue
try:
principal = schemeConfig["ServicePrincipal"]
if not principal:
credFactory = NegotiateCredentialFactory(
type="HTTP",
hostname=config.ServerHostName,
)
else:
credFactory = NegotiateCredentialFactory(
principal=principal,
)
except ValueError:
log.info("Could not start Kerberos")
continue
elif scheme == "digest":
credFactory = QopDigestCredentialFactory(
schemeConfig["Algorithm"],
schemeConfig["Qop"],
realm,
)
elif scheme == "basic":
credFactory = BasicCredentialFactory(realm)
elif scheme == "wiki":
pass
else:
log.error("Unknown scheme: {scheme}", scheme=scheme)
if credFactory:
wireEncryptedCredentialFactories.append(credFactory)
if schemeConfig.get("AllowedOverWireUnencrypted", False):
wireUnencryptedCredentialFactories.append(credFactory)
#
# Setup Resource hierarchy
#
log.info("Setting up document root at: {root}", root=config.DocumentRoot)
# principalCollection = directory.principalCollection
if config.EnableCalDAV:
log.info("Setting up calendar collection: {cls}", cls=calendarResourceClass)
calendarCollection = calendarResourceClass(
directory,
"/calendars/",
newStore,
)
if config.EnableCardDAV:
log.info("Setting up address book collection: {cls}", cls=addressBookResourceClass)
addressBookCollection = addressBookResourceClass(
directory,
"/addressbooks/",
newStore,
)
if config.DirectoryAddressBook.Enabled and config.EnableSearchAddressBook:
log.info("Setting up directory address book: {cls}",
cls=directoryBackedAddressBookResourceClass)
directoryBackedAddressBookCollection = directoryBackedAddressBookResourceClass(
principalCollections=(principalCollection,),
principalDirectory=directory,
uri=joinURL("/", config.DirectoryAddressBook.name, "/")
)
if _reactor._started:
directoryBackedAddressBookCollection.provisionDirectory()
else:
addSystemEventTrigger("after", "startup", directoryBackedAddressBookCollection.provisionDirectory)
else:
# remove /directory from previous runs that may have created it
directoryPath = os.path.join(config.DocumentRoot, config.DirectoryAddressBook.name)
try:
FilePath(directoryPath).remove()
log.info("Deleted: {path}", path=directoryPath)
except (OSError, IOError), e:
if e.errno != errno.ENOENT:
log.error("Could not delete: {path} : {error}", path=directoryPath, error=e)
log.info("Setting up root resource: {cls}", cls=rootResourceClass)
root = rootResourceClass(
config.DocumentRoot,
principalCollections=(principalCollection,),
)
root.putChild("principals", principalCollection)
if config.EnableCalDAV:
root.putChild("calendars", calendarCollection)
if config.EnableCardDAV:
root.putChild('addressbooks', addressBookCollection)
if config.DirectoryAddressBook.Enabled and config.EnableSearchAddressBook:
root.putChild(config.DirectoryAddressBook.name, directoryBackedAddressBookCollection)
# /.well-known
if config.EnableWellKnown:
log.info("Setting up .well-known collection resource")
wellKnownResource = SimpleResource(
principalCollections=(principalCollection,),
isdir=True,
defaultACL=SimpleResource.allReadACL
)
root.putChild(".well-known", wellKnownResource)
for enabled, wellknown_name, redirected_to in (
(config.EnableCalDAV, "caldav", "/",),
(config.EnableCardDAV, "carddav", "/",),
(config.TimezoneService.Enabled, "timezone", "/stdtimezones",),
(config.Scheduling.iSchedule.Enabled, "ischedule", "/ischedule"),
):
if enabled:
if config.EnableSSL:
scheme = "https"
port = config.SSLPort
else:
scheme = "http"
port = config.HTTPPort
wellKnownResource.putChild(
wellknown_name,
SimpleRedirectResource(
principalCollections=(principalCollection,),
isdir=False,
defaultACL=SimpleResource.allReadACL,
scheme=scheme, port=port, path=redirected_to)
)
for alias in config.Aliases:
url = alias.get("url", None)
path = alias.get("path", None)
if not url or not path or url[0] != "/":
log.error("Invalid alias: URL: {url} Path: {path}", url=url, path=path)
continue
urlbits = url[1:].split("/")
parent = root
for urlpiece in urlbits[:-1]:
child = parent.getChild(urlpiece)
if child is None:
child = Resource()
parent.putChild(urlpiece, child)
parent = child
if parent.getChild(urlbits[-1]) is not None:
log.error("Invalid alias: URL: {url} Path: {path} already exists", url=url, path=path)
continue
resource = FileResource(path)
parent.putChild(urlbits[-1], resource)
log.info("Added alias {url} -> {path}", url=url, path=path)
# Need timezone cache before setting up any timezone service
log.info("Setting up Timezone Cache")
TimezoneCache.create()
# Timezone service is optional
if config.EnableTimezoneService:
log.info("Setting up time zone service resource: {cls}",
cls=timezoneServiceResourceClass)
timezoneService = timezoneServiceResourceClass(
root,
)
root.putChild("timezones", timezoneService)
# Standard Timezone service is optional
if config.TimezoneService.Enabled:
log.info("Setting up standard time zone service resource: {cls}",
cls=timezoneStdServiceResourceClass)
timezoneStdService = timezoneStdServiceResourceClass(
root,
)
root.putChild("stdtimezones", timezoneStdService)
# TODO: we only want the master to do this
if _reactor._started:
_reactor.callLater(0, timezoneStdService.onStartup)
else:
addSystemEventTrigger("after", "startup", timezoneStdService.onStartup)
#
# iSchedule/cross-pod service for podding
#
if config.Servers.Enabled:
log.info("Setting up iSchedule podding inbox resource: {cls}", cls=iScheduleResourceClass)
ischedule = iScheduleResourceClass(
root,
newStore,
podding=True
)
root.putChild(config.Servers.InboxName, ischedule)
log.info("Setting up podding conduit resource: {cls}", cls=conduitResourceClass)
conduit = conduitResourceClass(
root,
newStore,
)
root.putChild(config.Servers.ConduitName, conduit)
#
# iSchedule service (not used for podding)
#
if config.Scheduling.iSchedule.Enabled:
log.info("Setting up iSchedule inbox resource: {cls}", cls=iScheduleResourceClass)
ischedule = iScheduleResourceClass(
root,
newStore,
)
root.putChild("ischedule", ischedule)
# Do DomainKey resources
DKIMUtils.validConfiguration(config)
if config.Scheduling.iSchedule.DKIM.Enabled:
log.info("Setting up domainkey resource: {res}", res=DomainKeyResource)
domain = config.Scheduling.iSchedule.DKIM.Domain if config.Scheduling.iSchedule.DKIM.Domain else config.ServerHostName
dk = DomainKeyResource(
domain,
config.Scheduling.iSchedule.DKIM.KeySelector,
config.Scheduling.iSchedule.DKIM.PublicKeyFile,
)
wellKnownResource.putChild("domainkey", dk)
#
# WebCal
#
if config.WebCalendarRoot:
log.info("Setting up WebCalendar resource: {res}",
res=config.WebCalendarRoot)
webCalendar = webCalendarResourceClass(
config.WebCalendarRoot,
principalCollections=(principalCollection,),
)
root.putChild("webcal", webCalendar)
#
# WebAdmin
#
if config.EnableWebAdmin:
log.info("Setting up WebAdmin resource")
webAdmin = webAdminResourceClass(
config.WebCalendarRoot,
root,
directory,
newStore,
principalCollections=(principalCollection,),
)
root.putChild("admin", webAdmin)
#
# Apple Push Notification Subscriptions
#
apnConfig = config.Notifications.Services.APNS
if apnConfig.Enabled:
log.info("Setting up APNS resource at /{url}",
url=apnConfig["SubscriptionURL"])
apnResource = apnSubscriptionResourceClass(root, newStore)
root.putChild(apnConfig["SubscriptionURL"], apnResource)
#
# Configure ancillary data
#
# MOVE2WHO
log.info("Configuring authentication wrapper")
overrides = {}
if resources:
for path, cls, args, schemes in resources:
# putChild doesn't want "/" starting the path
root.putChild(path, cls(root, newStore, *args))
# overrides requires "/" prepended
path = "/" + path
overrides[path] = []
for scheme in schemes:
if scheme == "basic":
overrides[path].append(BasicCredentialFactory(realm))
elif scheme == "digest":
schemeConfig = config.Authentication.Digest
overrides[path].append(QopDigestCredentialFactory(
schemeConfig["Algorithm"],
schemeConfig["Qop"],
realm,
))
log.info("Overriding {path} with {cls} ({schemes})",
path=path, cls=cls, schemes=schemes)
authWrapper = AuthenticationWrapper(
root,
portal,
wireEncryptedCredentialFactories,
wireUnencryptedCredentialFactories,
(auth.IPrincipal,),
overrides=overrides
)
logWrapper = DirectoryLogWrapperResource(
authWrapper,
directory,
)
# FIXME: Storing a reference to the root resource on the store
# until scheduling no longer needs resource objects
newStore.rootResource = root
return logWrapper
def getDBPool(config):
"""
Inspect configuration to determine what database connection pool
to set up.
return: (L{ConnectionPool}, transactionFactory)
"""
if config.DBType == 'oracle':
dialect = ORACLE_DIALECT
paramstyle = 'numeric'
else:
dialect = POSTGRES_DIALECT
paramstyle = 'pyformat'
pool = None
if config.DBAMPFD:
txnFactory = transactionFactoryFromFD(
int(config.DBAMPFD), dialect, paramstyle
)
elif not config.UseDatabase:
txnFactory = None
elif not config.SharedConnectionPool:
if config.DBType == '':
# get a PostgresService to tell us what the local connection
# info is, but *don't* start it (that would start one postgres
# master per slave, resulting in all kinds of mayhem...)
connectionFactory = pgServiceFromConfig(
config, None).produceConnection
elif config.DBType == 'postgres':
connectionFactory = pgConnectorFromConfig(config)
elif config.DBType == 'oracle':
connectionFactory = oracleConnectorFromConfig(config)
else:
raise UsageError("unknown DB type: %r" % (config.DBType,))
pool = ConnectionPool(connectionFactory, dialect=dialect,
paramstyle=paramstyle,
maxConnections=config.MaxDBConnectionsPerPool)
txnFactory = pool.connection
else:
raise UsageError(
"trying to use DB in slave, but no connection info from parent"
)
return (pool, txnFactory)
class FakeRequest(object):
def __init__(self, rootResource, method, path, uri='/', transaction=None):
self.rootResource = rootResource
self.method = method
self.path = path
self.uri = uri
self._resourcesByURL = {}
self._urlsByResource = {}
self.headers = Headers()
if transaction is not None:
self._newStoreTransaction = transaction
@inlineCallbacks
def _getChild(self, resource, segments):
if not segments:
returnValue(resource)
child, remaining = (yield resource.locateChild(self, segments))
returnValue((yield self._getChild(child, remaining)))
@inlineCallbacks
def locateResource(self, url):
url = url.strip("/")
segments = url.split("/")
resource = (yield self._getChild(self.rootResource, segments))
if resource:
self._rememberResource(resource, url)
returnValue(resource)
@inlineCallbacks
def locateChildResource(self, parent, childName):
if parent is None or childName is None:
returnValue(None)
parentURL = self.urlForResource(parent)
if not parentURL.endswith("/"):
parentURL += "/"
url = parentURL + quote(childName)
segment = childName
resource = (yield self._getChild(parent, [segment]))
if resource:
self._rememberResource(resource, url)
returnValue(resource)
def _rememberResource(self, resource, url):
self._resourcesByURL[url] = resource
self._urlsByResource[resource] = url
return resource
def _forgetResource(self, resource, url):
if url in self._resourcesByURL:
del self._resourcesByURL[url]
if resource in self._urlsByResource:
del self._urlsByResource[resource]
def urlForResource(self, resource):
url = self._urlsByResource.get(resource, None)
if url is None:
class NoURLForResourceError(RuntimeError):
pass
raise NoURLForResourceError(resource)
return url
def addResponseFilter(self, *args, **kwds):
pass
def memoryForPID(pid, residentOnly=True):
"""
Return the amount of memory in use for the given process. If residentOnly is True,
then RSS is returned; if False, then virtual memory is returned.
@param pid: process id
@type pid: C{int}
@param residentOnly: Whether only resident memory should be included
@type residentOnly: C{boolean}
@return: Memory used by process in bytes
@rtype: C{int}
"""
memoryInfo = psutil.Process(pid).get_memory_info()
return memoryInfo.rss if residentOnly else memoryInfo.vms
class MemoryLimitService(Service, object):
"""
A service which when paired with a DelayedStartupProcessMonitor will periodically
examine the memory usage of the monitored processes and stop any which exceed
a configured limit. Memcached processes are ignored.
"""
def __init__(self, processMonitor, intervalSeconds, limitBytes, residentOnly, reactor=None):
"""
@param processMonitor: the DelayedStartupProcessMonitor
@param intervalSeconds: how often to check
@type intervalSeconds: C{int}
@param limitBytes: any monitored process over this limit is stopped
@type limitBytes: C{int}
@param residentOnly: whether only resident memory should be included
@type residentOnly: C{boolean}
@param reactor: for testing
"""
self._processMonitor = processMonitor
self._seconds = intervalSeconds
self._bytes = limitBytes
self._residentOnly = residentOnly
self._delayedCall = None
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
# Unit tests can swap out _memoryForPID
self._memoryForPID = memoryForPID
def startService(self):
"""
Start scheduling the memory checks
"""
super(MemoryLimitService, self).startService()
self._delayedCall = self._reactor.callLater(self._seconds, self.checkMemory)
def stopService(self):
"""
Stop checking memory
"""
super(MemoryLimitService, self).stopService()
if self._delayedCall is not None and self._delayedCall.active():
self._delayedCall.cancel()
self._delayedCall = None
def checkMemory(self):
"""
Stop any processes monitored by our paired processMonitor whose resident
memory exceeds our configured limitBytes. Reschedule intervalSeconds in
the future.
"""
try:
for name in self._processMonitor.processes:
if name.startswith("memcached"):
continue
proto = self._processMonitor.protocols.get(name, None)
if proto is not None:
proc = proto.transport
pid = proc.pid
try:
memory = self._memoryForPID(pid, self._residentOnly)
except Exception, e:
log.error("Unable to determine memory usage of PID: {pid} ({err})",
pid=pid, err=e)
continue
if memory > self._bytes:
log.warn("Killing large process: {name} PID:{pid} {memtype}:{mem}",
name=name, pid=pid,
memtype=("Resident" if self._residentOnly else "Virtual"),
mem=memory)
self._processMonitor.stopProcess(name)
finally:
self._delayedCall = self._reactor.callLater(self._seconds, self.checkMemory)
def checkDirectories(config):
"""
Make sure that various key directories exist (and create if needed)
"""
#
# Verify that server root actually exists
#
checkDirectory(
config.ServerRoot,
"Server root",
# Require write access because one might not allow editing on /
access=os.W_OK,
wait=True # Wait in a loop until ServerRoot exists
)
#
# Verify that other root paths are OK
#
if config.DataRoot.startswith(config.ServerRoot + os.sep):
checkDirectory(
config.DataRoot,
"Data root",
access=os.W_OK,
create=(0750, config.UserName, config.GroupName),
)
if config.DocumentRoot.startswith(config.DataRoot + os.sep):
checkDirectory(
config.DocumentRoot,
"Document root",
# Don't require write access because one might not allow editing on /
access=os.R_OK,
create=(0750, config.UserName, config.GroupName),
)
if config.ConfigRoot.startswith(config.ServerRoot + os.sep):
checkDirectory(
config.ConfigRoot,
"Config root",
access=os.W_OK,
create=(0750, config.UserName, config.GroupName),
)
# Always create these:
checkDirectory(
config.LogRoot,
"Log root",
access=os.W_OK,
create=(0750, config.UserName, config.GroupName),
)
checkDirectory(
config.RunRoot,
"Run root",
access=os.W_OK,
create=(0770, config.UserName, config.GroupName),
)
class Stepper(object):
"""
Manages the sequential, deferred execution of "steps" which are objects
implementing these methods:
- stepWithResult(result)
@param result: the result returned from the previous step
@returns: Deferred
- stepWithFailure(failure)
@param failure: a Failure encapsulating the exception from the
previous step
@returns: Failure to continue down the errback chain, or a
Deferred returning a non-Failure to switch back to the
callback chain
"Step" objects are added in order by calling addStep(), and when start()
is called, the Stepper will call the stepWithResult() of the first step.
If stepWithResult() doesn't raise an Exception, the Stepper will call the
next step's stepWithResult(). If a stepWithResult() raises an Exception,
the Stepper will call the next step's stepWithFailure() -- if it's
implemented -- passing it a Failure object. If the stepWithFailure()
decides it can handle the Failure and proceed, it can return a non-Failure
which is an indicator to the Stepper to call the next step's
stepWithResult().
TODO: Create an IStep interface (?)
"""
def __init__(self):
self.steps = []
self.failure = None
self.result = None
self.running = False
def addStep(self, step):
"""
Adds a step object to the ordered list of steps
@param step: the object to add
@type step: an object implementing stepWithResult()
@return: the Stepper object itself so addStep() calls can be chained
"""
if self.running:
raise RuntimeError("Can't add step after start")
self.steps.append(step)
return self
def defaultStepWithResult(self, result):
return succeed(result)
def defaultStepWithFailure(self, failure):
if failure.type != NotAllowedToUpgrade:
log.failure("Step failure", failure=failure)
return failure
# def protectStep(self, callback):
# def _protected(result):
# try:
# return callback(result)
# except Exception, e:
# # TODO: how to turn Exception into Failure
# return Failure()
# return _protected
def start(self, result=None):
"""
Begin executing the added steps in sequence. If a step object
does not implement a stepWithResult/stepWithFailure method, a
default implementation will be used.
@param result: an optional value to pass to the first step
@return: the Deferred that will fire when steps are done
"""
self.running = True
self.deferred = Deferred()
for step in self.steps:
# See if we need to use a default implementation of the step methods:
if hasattr(step, "stepWithResult"):
callBack = step.stepWithResult
# callBack = self.protectStep(step.stepWithResult)
else:
callBack = self.defaultStepWithResult
if hasattr(step, "stepWithFailure"):
errBack = step.stepWithFailure
else:
errBack = self.defaultStepWithFailure
# Add callbacks to the Deferred
self.deferred.addCallbacks(callBack, errBack)
# Get things going
self.deferred.callback(result)
return self.deferred
|
trevor/calendarserver
|
calendarserver/tap/util.py
|
Python
|
apache-2.0
| 38,773
| 0.00227
|
from __future__ import print_function
from .patchpipette import PatchPipette
|
pbmanis/acq4
|
acq4/devices/PatchPipette/__init__.py
|
Python
|
mit
| 77
| 0
|
"""Models for the util app. """
import cStringIO
import gzip
import logging
from config_models.models import ConfigurationModel
from django.db import models
from django.utils.text import compress_string
from opaque_keys.edx.django.models import CreatorMixin
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class RateLimitConfiguration(ConfigurationModel):
"""Configuration flag to enable/disable rate limiting.
Applies to Django Rest Framework views.
This is useful for disabling rate limiting for performance tests.
When enabled, it will disable rate limiting on any view decorated
with the `can_disable_rate_limit` class decorator.
"""
class Meta(ConfigurationModel.Meta):
app_label = "util"
def decompress_string(value):
"""
Helper function to reverse CompressedTextField.get_prep_value.
"""
try:
val = value.encode('utf').decode('base64')
zbuf = cStringIO.StringIO(val)
zfile = gzip.GzipFile(fileobj=zbuf)
ret = zfile.read()
zfile.close()
except Exception as e:
logger.error('String decompression failed. There may be corrupted data in the database: %s', e)
ret = value
return ret
class CompressedTextField(CreatorMixin, models.TextField):
""" TextField that transparently compresses data when saving to the database, and decompresses the data
when retrieving it from the database. """
def get_prep_value(self, value):
""" Compress the text data. """
if value is not None:
if isinstance(value, unicode):
value = value.encode('utf8')
value = compress_string(value)
value = value.encode('base64').decode('utf8')
return value
def to_python(self, value):
""" Decompresses the value from the database. """
if isinstance(value, unicode):
value = decompress_string(value)
return value
|
ahmedaljazzar/edx-platform
|
common/djangoapps/util/models.py
|
Python
|
agpl-3.0
| 1,962
| 0.001019
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_profile_sip
short_description: BIG-IP ltm profile sip module
description:
- Configures a Session Initiation Protocol (SIP) profile.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
alg_enable:
description:
- Enables or disables the SIP ALG (Application Level Gateway) feature.
default: disabled
choices: ['disabled', 'enabled']
app_service:
description:
- Specifies the name of the application service to which the object belongs.
community:
description:
- Specifies the community to which you want to assign the virtual server that you associate with this
profile.
defaults_from:
description:
- Specifies the profile that you want to use as the parent profile.
default: sip
description:
description:
- User defined description.
dialog_aware:
description:
- Enables or disables the ability for the system to be aware of unauthorized use of the SIP dialog.
default: disabled
choices: ['disabled', 'enabled']
dialog_establishment_timeout:
description:
- Indicates the timeout value for dialog establishment in a sip session.
default: 10
enable_sip_firewall:
description:
- Indicates whether to enable SIP firewall functionality or not.
default: no
choices: ['no', 'yes']
insert_record_route_header:
description:
- Enables or disables the insertion of a Record-Route header, which indicates the next hop for the following
SIP request messages.
default: disabled
choices: ['disabled', 'enabled']
insert_via_header:
description:
- Enables or disables the insertion of a Via header, which indicates where the message originated.
default: disabled
choices: ['disabled', 'enabled']
log_profile:
description:
- Specify the name of the ALG log profile which controls the logging of ALG .
log_publisher:
description:
- Specify the name of the log publisher which logs translation events.
max_media_sessions:
description:
- Indicates the maximum number of SDP media sessions that the BIG-IP system accepts.
default: 6
max_registrations:
description:
- Indicates the maximum number of registrations, the maximum allowable REGISTER messages can be recorded
that the BIG-IP system accepts.
default: 100
max_sessions_per_registration:
description:
- Indicates the maximum number of calls or sessions can be made by a user for a single registration that the
BIG-IP system accepts.
default: 50
max_size:
description:
- Specifies the maximum SIP message size that the BIG-IP system accepts.
default: 65535
name:
description:
- Specifies a unique name for the component.
required: true
partition:
description:
- Displays the administrative partition within which the component resides.
registration_timeout:
description:
- Indicates the timeout value for a sip registration.
default: 3600
rtp_proxy_style:
description:
- Indicates the style in which the RTP will proxy the data.
default: symmetric
choices: ['symmetric', 'restricted-by-ip-address', 'any-location']
secure_via_header:
description:
- Enables or disables the insertion of a Secure Via header, which indicates where the message originated.
default: disabled
choices: ['disabled', 'enabled']
security:
description:
- Enables or disables security for the SIP profile.
default: disabled
choices: ['disabled', 'enabled']
sip_session_timeout:
description:
- Indicates the timeout value for a sip session.
default: 300
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
terminate_on_bye:
description:
- Enables or disables the termination of a connection when a BYE transaction finishes.
default: enabled
choices: ['disabled', 'enabled']
user_via_header:
description:
- Enables or disables the insertion of a Via header specified by a system administrator.
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM Profile sip
f5bigip_ltm_profile_sip:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_sip_profile
partition: Common
description: My sip profile
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_POLAR_CHOICES
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
alg_enable=dict(type='str', choices=F5_ACTIVATION_CHOICES),
app_service=dict(type='str'),
community=dict(type='str'),
defaults_from=dict(type='str'),
description=dict(type='str'),
dialog_aware=dict(type='str', choices=F5_ACTIVATION_CHOICES),
dialog_establishment_timeout=dict(type='int'),
enable_sip_firewall=dict(type='str', choices=F5_POLAR_CHOICES),
insert_record_route_header=dict(type='str', choices=F5_ACTIVATION_CHOICES),
insert_via_header=dict(type='str', choices=F5_ACTIVATION_CHOICES),
log_profile=dict(type='str'),
log_publisher=dict(type='str'),
max_media_sessions=dict(type='int'),
max_registrations=dict(type='int'),
max_sessions_per_registration=dict(type='int'),
max_size=dict(type='int'),
registration_timeout=dict(type='int'),
rtp_proxy_style=dict(type='str', choices=['symmetric', 'restricted-by-ip-address', 'any-location']),
secure_via_header=dict(type='str', choices=F5_ACTIVATION_CHOICES),
security=dict(type='str', choices=F5_ACTIVATION_CHOICES),
sip_session_timeout=dict(type='int'),
terminate_on_bye=dict(type='str', choices=F5_ACTIVATION_CHOICES),
user_via_header=dict(type='str')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmProfileSip(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.profile.sips.sip.create,
'read': self._api.tm.ltm.profile.sips.sip.load,
'update': self._api.tm.ltm.profile.sips.sip.update,
'delete': self._api.tm.ltm.profile.sips.sip.delete,
'exists': self._api.tm.ltm.profile.sips.sip.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmProfileSip(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
|
erjac77/ansible-module-f5bigip
|
library/f5bigip_ltm_profile_sip.py
|
Python
|
apache-2.0
| 8,676
| 0.003112
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_auth
short_description: "Module to manage authentication to oVirt/RHV"
author: "Ondra Machacek (@machacekondra)"
version_added: "2.2"
description:
- "This module authenticates to oVirt/RHV engine and creates SSO token, which should be later used in
all other oVirt/RHV modules, so all modules don't need to perform login and logout.
This module returns an Ansible fact called I(ovirt_auth). Every module can use this
fact as C(auth) parameter, to perform authentication."
options:
state:
default: present
choices: ['present', 'absent']
description:
- "Specifies if a token should be created or revoked."
username:
required: False
description:
- "The name of the user. For example: I(admin@internal)
Default value is set by I(OVIRT_USERNAME) environment variable."
password:
required: False
description:
- "The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
token:
required: False
description:
- "SSO token to be used instead of login with username/password.
Default value is set by I(OVIRT_TOKEN) environment variable."
version_added: 2.5
url:
required: False
description:
- "A string containing the API URL of the server.
For example: I(https://server.example.com/ovirt-engine/api).
Default value is set by I(OVIRT_URL) environment variable."
- "Either C(url) or C(hostname) is required."
hostname:
required: False
description:
- "A string containing the hostname of the server.
For example: I(server.example.com).
Default value is set by I(OVIRT_HOSTNAME) environment variable."
- "Either C(url) or C(hostname) is required."
version_added: "2.6"
insecure:
required: False
description:
- "A boolean flag that indicates if the server TLS certificate and host name should be checked."
type: bool
ca_file:
required: False
description:
- "A PEM file containing the trusted CA certificates. The
certificate presented by the server will be verified using these CA
certificates. If C(ca_file) parameter is not set, system wide
CA certificate store is used.
Default value is set by I(OVIRT_CAFILE) environment variable."
timeout:
required: False
description:
- "The maximum total time to wait for the response, in
seconds. A value of zero (the default) means wait forever. If
the timeout expires before the response is received an exception
will be raised."
compress:
required: False
description:
- "A boolean flag indicating if the SDK should ask
the server to send compressed responses. The default is I(True).
Note that this is a hint for the server, and that it may return
uncompressed data even when this parameter is set to I(True)."
type: bool
kerberos:
required: False
description:
- "A boolean flag indicating if Kerberos authentication
should be used instead of the default basic authentication."
type: bool
headers:
required: False
description:
- "A dictionary of HTTP headers to be added to each API call."
version_added: "2.4"
requirements:
- python >= 2.7
- ovirt-engine-sdk-python >= 4.3.0
notes:
- "Everytime you use ovirt_auth module to obtain ticket, you need to also revoke the ticket,
when you no longer need it, otherwise the ticket would be revoked by engine when it expires.
For an example of how to achieve that, please take a look at I(examples) section."
- "In order to use this module you have to install oVirt/RHV Python SDK.
To ensure it's installed with correct version you can create the following task:
I(pip: name=ovirt-engine-sdk-python version=4.3.0)"
- "Note that in oVirt/RHV 4.1 if you want to use a user which is not administrator
you must enable the I(ENGINE_API_FILTER_BY_DEFAULT) variable in engine. In
oVirt/RHV 4.2 and later it's enabled by default."
'''
EXAMPLES = '''
- block:
# Create a vault with `ovirt_password` variable which store your
# oVirt/RHV user's password, and include that yaml file with variable:
- include_vars: ovirt_password.yml
- name: Obtain SSO token with using username/password credentials
ovirt_auth:
url: https://ovirt.example.com/ovirt-engine/api
username: admin@internal
ca_file: ca.pem
password: "{{ ovirt_password }}"
# Previous task generated I(ovirt_auth) fact, which you can later use
# in different modules as follows:
- ovirt_vm:
auth: "{{ ovirt_auth }}"
state: absent
name: myvm
always:
- name: Always revoke the SSO token
ovirt_auth:
state: absent
ovirt_auth: "{{ ovirt_auth }}"
# When user will set following environment variables:
# OVIRT_URL = https://fqdn/ovirt-engine/api
# OVIRT_USERNAME = admin@internal
# OVIRT_PASSWORD = the_password
# User can login the oVirt using environment variable instead of variables
# in yaml file.
# This is mainly useful when using Ansible Tower or AWX, as it will work
# for Red Hat Virtualization credentials type.
- name: Obtain SSO token
ovirt_auth:
state: present
'''
RETURN = '''
ovirt_auth:
description: Authentication facts, needed to perform authentication to oVirt/RHV.
returned: success
type: complex
contains:
token:
description: SSO token which is used for connection to oVirt/RHV engine.
returned: success
type: str
sample: "kdfVWp9ZgeewBXV-iq3Js1-xQJZPSEQ334FLb3eksoEPRaab07DhZ8ED8ghz9lJd-MQ2GqtRIeqhvhCkrUWQPw"
url:
description: URL of the oVirt/RHV engine API endpoint.
returned: success
type: str
sample: "https://ovirt.example.com/ovirt-engine/api"
ca_file:
description: CA file, which is used to verify SSL/TLS connection.
returned: success
type: str
sample: "ca.pem"
insecure:
description: Flag indicating if insecure connection is used.
returned: success
type: bool
sample: False
timeout:
description: Number of seconds to wait for response.
returned: success
type: int
sample: 0
compress:
description: Flag indicating if compression is used for connection.
returned: success
type: bool
sample: True
kerberos:
description: Flag indicating if kerberos is used for authentication.
returned: success
type: bool
sample: False
headers:
description: Dictionary of HTTP headers to be added to each API call.
returned: success
type: dict
'''
import os
import traceback
try:
import ovirtsdk4 as sdk
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import check_sdk
def main():
module = AnsibleModule(
argument_spec=dict(
url=dict(default=None),
hostname=dict(default=None),
username=dict(default=None),
password=dict(default=None, no_log=True),
ca_file=dict(default=None, type='path'),
insecure=dict(required=False, type='bool', default=None),
timeout=dict(required=False, type='int', default=0),
compress=dict(required=False, type='bool', default=True),
kerberos=dict(required=False, type='bool', default=False),
headers=dict(required=False, type='dict'),
state=dict(default='present', choices=['present', 'absent']),
token=dict(default=None, no_log=True),
ovirt_auth=dict(required=None, type='dict'),
),
required_if=[
('state', 'absent', ['ovirt_auth']),
],
supports_check_mode=True,
)
check_sdk(module)
state = module.params.get('state')
if state == 'present':
params = module.params
elif state == 'absent':
params = module.params['ovirt_auth']
def get_required_parameter(param, env_var, required=False):
var = params.get(param) or os.environ.get(env_var)
if not var and required and state == 'present':
module.fail_json(msg="'%s' is a required parameter." % param)
return var
url = get_required_parameter('url', 'OVIRT_URL', required=False)
hostname = get_required_parameter('hostname', 'OVIRT_HOSTNAME', required=False)
if url is None and hostname is None:
module.fail_json(msg="You must specify either 'url' or 'hostname'.")
if url is None and hostname is not None:
url = 'https://{0}/ovirt-engine/api'.format(hostname)
username = get_required_parameter('username', 'OVIRT_USERNAME')
password = get_required_parameter('password', 'OVIRT_PASSWORD')
token = get_required_parameter('token', 'OVIRT_TOKEN')
ca_file = get_required_parameter('ca_file', 'OVIRT_CAFILE')
insecure = params.get('insecure') if params.get('insecure') is not None else not bool(ca_file)
connection = sdk.Connection(
url=url,
username=username,
password=password,
ca_file=ca_file,
insecure=insecure,
timeout=params.get('timeout'),
compress=params.get('compress'),
kerberos=params.get('kerberos'),
headers=params.get('headers'),
token=token,
)
try:
token = connection.authenticate()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_auth=dict(
token=token,
url=url,
ca_file=ca_file,
insecure=insecure,
timeout=params.get('timeout'),
compress=params.get('compress'),
kerberos=params.get('kerberos'),
headers=params.get('headers'),
) if state == 'present' else dict()
)
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
# Close the connection, but don't revoke token
connection.close(logout=state == 'absent')
if __name__ == "__main__":
main()
|
2ndQuadrant/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_auth.py
|
Python
|
gpl-3.0
| 11,230
| 0.002048
|
"""
Application entry point
"""
def main():
pass
if __name__ == "__main__":
# delegates to main_debug during construction
try:
import main_debug
main_debug.main()
except ImportError:
main()
|
davidveen/nolava
|
src/main.py
|
Python
|
gpl-3.0
| 234
| 0.004274
|
"""
XING OAuth1 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/xing.html
"""
from social.backends.oauth import BaseOAuth1
class XingOAuth(BaseOAuth1):
"""Xing OAuth authentication backend"""
name = 'xing'
AUTHORIZATION_URL = 'https://api.xing.com/v1/authorize'
REQUEST_TOKEN_URL = 'https://api.xing.com/v1/request_token'
ACCESS_TOKEN_URL = 'https://api.xing.com/v1/access_token'
SCOPE_SEPARATOR = '+'
EXTRA_DATA = [
('id', 'id'),
('user_id', 'user_id')
]
def get_user_details(self, response):
"""Return user details from Xing account"""
email = response.get('email', '')
fullname, first_name, last_name = self.get_user_names(
first_name=response['first_name'],
last_name=response['last_name']
)
return {'username': first_name + last_name,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name,
'email': email}
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
profile = self.get_json(
'https://api.xing.com/v1/users/me.json',
auth=self.oauth_auth(access_token)
)['users'][0]
return {
'user_id': profile['id'],
'id': profile['id'],
'first_name': profile['first_name'],
'last_name': profile['last_name'],
'email': profile['active_email']
}
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/social/backends/xing.py
|
Python
|
agpl-3.0
| 1,519
| 0
|
#!/usr/bin/env python
import pytest
from pyxenon_snippets import slurm_queues_getter_with_props
def test_slurm_queues_getter_with_props():
slurm_queues_getter_with_props.run_example()
|
NLeSC/Xenon-examples
|
readthedocs/code-tabs/python/tests/test_slurm_queues_getter_with_props.py
|
Python
|
apache-2.0
| 192
| 0.005208
|
"""
byceps.services.shop.article.dbmodels.article
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime
from decimal import Decimal
from typing import Optional
from sqlalchemy.ext.hybrid import hybrid_property
from .....database import db, generate_uuid
from .....util.instances import ReprBuilder
from ...shop.transfer.models import ShopID
from ..transfer.models import ArticleNumber, ArticleType
class Article(db.Model):
"""An article that can be bought."""
__tablename__ = 'shop_articles'
__table_args__ = (
db.UniqueConstraint('shop_id', 'description'),
db.CheckConstraint('available_from < available_until'),
)
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
shop_id = db.Column(db.UnicodeText, db.ForeignKey('shops.id'), index=True, nullable=False)
item_number = db.Column(db.UnicodeText, unique=True, nullable=False)
_type = db.Column('type', db.UnicodeText, nullable=False)
description = db.Column(db.UnicodeText, nullable=False)
price = db.Column(db.Numeric(6, 2), nullable=False)
tax_rate = db.Column(db.Numeric(3, 3), nullable=False)
available_from = db.Column(db.DateTime, nullable=True)
available_until = db.Column(db.DateTime, nullable=True)
total_quantity = db.Column(db.Integer, nullable=False)
quantity = db.Column(db.Integer, db.CheckConstraint('quantity >= 0'), nullable=False)
max_quantity_per_order = db.Column(db.Integer, nullable=False)
not_directly_orderable = db.Column(db.Boolean, default=False, nullable=False)
separate_order_required = db.Column(db.Boolean, default=False, nullable=False)
shipping_required = db.Column(db.Boolean, nullable=False)
def __init__(
self,
shop_id: ShopID,
item_number: ArticleNumber,
type_: ArticleType,
description: str,
price: Decimal,
tax_rate: Decimal,
total_quantity: int,
max_quantity_per_order: int,
shipping_required: bool,
*,
available_from: Optional[datetime] = None,
available_until: Optional[datetime] = None,
) -> None:
self.shop_id = shop_id
self.item_number = item_number
self._type = type_.name
self.description = description
self.price = price
self.tax_rate = tax_rate
self.available_from = available_from
self.available_until = available_until
self.total_quantity = total_quantity
self.quantity = total_quantity # Initialize with total quantity.
self.max_quantity_per_order = max_quantity_per_order
self.shipping_required = shipping_required
@hybrid_property
def type_(self) -> ArticleType:
return ArticleType[self._type]
def __repr__(self) -> str:
return ReprBuilder(self) \
.add_with_lookup('id') \
.add('shop', self.shop_id) \
.add_with_lookup('item_number') \
.add_with_lookup('description') \
.build()
|
homeworkprod/byceps
|
byceps/services/shop/article/dbmodels/article.py
|
Python
|
bsd-3-clause
| 3,124
| 0.00128
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 21, 2020
@author: alfoa, wangc
Cross-validated Lasso, using the LARS algorithm.
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
from numpy import finfo
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class LassoLarsCV(ScikitLearnBase):
"""
Cross-validated Lasso, using the LARS algorithm
"""
info = {'problemtype':'regression', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.linear_model
self.model = sklearn.linear_model.LassoLarsCV
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(LassoLarsCV, cls).getInputSpecification()
specs.description = r"""The \xmlNode{LassoLarsCV} (\textit{Cross-validated Lasso model fit with Least Angle Regression})
This model is an augomentation of the LassoLars model with the addition of
cross validation tecniques.
The optimization objective for Lasso is:
\begin{equation}
(1 / (2 * n\_samples)) * ||y - Xw||^2\_2 + alpha * ||w||\_1
\end{equation}
\zNormalizationNotPerformed{LassoLarsCV}
"""
specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType,
descr=r"""Whether the intercept should be estimated or not. If False,
the data is assumed to be already centered.""", default=True))
specs.addSub(InputData.parameterInputFactory("max_iter", contentType=InputTypes.IntegerType,
descr=r"""The maximum number of iterations.""", default=500))
specs.addSub(InputData.parameterInputFactory("normalize", contentType=InputTypes.BoolType,
descr=r"""This parameter is ignored when fit_intercept is set to False. If True,
the regressors X will be normalized before regression by subtracting the mean and
dividing by the l2-norm.""", default=True))
specs.addSub(InputData.parameterInputFactory("precompute", contentType=InputTypes.StringType,
descr=r"""Whether to use a precomputed Gram matrix to speed up calculations.
For sparse input this option is always True to preserve sparsity.""", default='auto'))
specs.addSub(InputData.parameterInputFactory("max_n_alphas", contentType=InputTypes.IntegerType,
descr=r"""The maximum number of points on the path used to compute the residuals in
the cross-validation""", default=1000))
specs.addSub(InputData.parameterInputFactory("eps", contentType=InputTypes.FloatType,
descr=r"""The machine-precision regularization in the computation of the Cholesky
diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol
parameter in some iterative optimization-based algorithms, this parameter does not
control the tolerance of the optimization.""", default=finfo(float).eps))
specs.addSub(InputData.parameterInputFactory("positive", contentType=InputTypes.BoolType,
descr=r"""When set to True, forces the coefficients to be positive.""", default=False))
specs.addSub(InputData.parameterInputFactory("cv", contentType=InputTypes.IntegerType,
descr=r"""Determines the cross-validation splitting strategy.
It specifies the number of folds..""", default=None))
specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.BoolType,
descr=r"""Amount of verbosity.""", default=False))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['fit_intercept','max_iter', 'normalize', 'precompute',
'max_n_alphas','eps','positive','cv', 'verbose'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
|
idaholab/raven
|
framework/SupervisedLearning/ScikitLearn/LinearModel/LassoLarsCV.py
|
Python
|
apache-2.0
| 6,473
| 0.00896
|
import numpy as np
from bokeh.io import curdoc, show
from bokeh.models import ColumnDataSource, Grid, LinearAxis, Plot, Triangle
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
source = ColumnDataSource(dict(x=x, y=y, sizes=sizes))
plot = Plot(
title=None, plot_width=300, plot_height=300,
min_border=0, toolbar_location=None)
glyph = Triangle(x="x", y="y", size="sizes", line_color="#99d594", line_width=2, fill_color=None)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
|
ericmjl/bokeh
|
examples/reference/models/Triangle.py
|
Python
|
bsd-3-clause
| 749
| 0.001335
|
#!/usr/bin/env python
import pynotify
'''
No purpose here other than creating a callable library for system notifications
'''
class message:
def __init__(self, messagex):
pynotify.init('EventCall')
m = pynotify.Notification("RSEvent Notification", "%s" % messagex)
m.show()
|
krazybean/message_agent_abandoned
|
lin/lin_notify_lib.py
|
Python
|
apache-2.0
| 282
| 0.01773
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Admin model views for records."""
import json
from flask import flash
from flask_admin.contrib.sqla import ModelView
from flask_babelex import gettext as _
from invenio_admin.filters import FilterConverter
from invenio_db import db
from markupsafe import Markup
from sqlalchemy.exc import SQLAlchemyError
from .api import Record
from .models import RecordMetadata
class RecordMetadataModelView(ModelView):
"""Records admin model view."""
filter_converter = FilterConverter()
can_create = False
can_edit = False
can_delete = True
can_view_details = True
column_list = ('id', 'version_id', 'updated', 'created',)
column_details_list = ('id', 'version_id', 'updated', 'created', 'json')
column_labels = dict(
id=_('UUID'),
version_id=_('Revision'),
json=_('JSON'),
)
column_formatters = dict(
version_id=lambda v, c, m, p: m.version_id-1,
json=lambda v, c, m, p: Markup("<pre>{0}</pre>".format(
json.dumps(m.json, indent=2, sort_keys=True)))
)
column_filters = ('created', 'updated', )
column_default_sort = ('updated', True)
page_size = 25
def delete_model(self, model):
"""Delete a record."""
try:
if model.json is None:
return True
record = Record(model.json, model=model)
record.delete()
db.session.commit()
except SQLAlchemyError as e:
if not self.handle_view_exception(e):
flash(_('Failed to delete record. %(error)s', error=str(e)),
category='error')
db.session.rollback()
return False
return True
record_adminview = dict(
modelview=RecordMetadataModelView,
model=RecordMetadata,
category=_('Records'))
|
tiborsimko/invenio-records
|
invenio_records/admin.py
|
Python
|
mit
| 2,051
| 0.000488
|
import hashlib as md5
class Palette:
def __init__(self, palette={}, colors=[]):
self.job_status_palette = {
'Received': '#D9E7F8',
'Checking': '#FAFAFA',
'Staging': '#6190CD',
'Waiting': '#004EFF',
'Matched': '#FEF7AA',
'Running': '#FDEE65',
'Stalled': '#BC5757',
'Completed': '#00FF21',
'Done': '#238802',
'Failed': '#FF0000',
'failed': '#FF0000',
'Killed': '#111111'
}
self.job_minor_status_palette = {
"AncestorDepth Not Found" : '#BAA312',
'Application Finished With Errors' : '#BC2133',
'BK Input Data Not Available' : '#E6D600',
'BK-LFC Integrity Check Failed' : '#BC1143',
'Can not get Active and Banned Sites from JobDB' : '#84CBFF',
'Chosen site is not eligible' : '#B4A243',
'Error Sending Staging Request' : '#B4A243',
'Exceeded Maximum Dataset Limit (100)' : '#BA5C9D',
'Exception During Execution' : '#AA240C',
'Execution Complete' : '#338B39',
'Failed to access database' : '#FFE267',
'File Catalog Access Failure' : '#FF8000',
'Illegal Job JDL' : '#D96C00',
'Impossible Site + InputData Requirement' : '#BDA822',
'Impossible Site Requirement' : '#F87500',
'Input Data Not Available' : '#2822A6',
'Input Data Resolution' : '#FFBE94',
'Input Sandbox Download' : '#586CFF',
'Input data contains //' : '#AB7800',
'Input data not correctly specified' : '#6812D6',
'Job Wrapper Initialization' : '#FFFFCC',
'Job has exceeded maximum wall clock time' : '#FF33CC',
'Job has insufficient disk space to continue' : '#33FFCC',
'Job has reached the CPU limit of the queue' : '#AABBCC',
'No Ancestors Found For Input Data' : '#BDA544',
'No candidate sites available' : '#E2FFBC',
'No eligible sites for job' : '#A8D511',
'Parameter not found' : '#FFB80C',
'Pending Requests' : '#52FF4F',
'Received Kill signal' : '#FF312F',
'Socket read timeout exceeded' : '#B400FE',
'Stalled' : '#FF655E',
'Uploading Job Outputs' : '#FE8420',
'Watchdog identified this job as stalled' : '#FFCC99'
}
self.miscelaneous_pallette = {
'Others': '#666666',
'NoLabels': '#0025AD',
'Total': '#00FFDC',
'Default': '#FDEE65'
}
self.country_palette = {
'France':'#73C6BC',
'UK':'#DCAF8A',
'Spain':'#C2B0E1',
'Netherlands':'#A9BF8E',
'Germany':'#800000',
'Russia':'#00514A',
'Italy':'#004F00',
'Switzerland':'#433B00',
'Poland':'#528220',
'Hungary':'#825CE2',
'Portugal':'#009182',
'Turkey':'#B85D00'
}
self.palette = self.country_palette
self.palette.update(self.job_status_palette)
self.palette.update(self.miscelaneous_pallette)
self.palette.update(self.job_minor_status_palette)
def setPalette(self, palette):
self.palette = palette
def setColor(self, label, color):
self.palette[label] = color
def addPalette(self, palette):
self.palette.update(palette)
def getColor(self, label):
if label in self.palette.keys():
return self.palette[label]
else:
return self.generateColor(label)
def generateColor(self, label):
if label == None: label = str(label)
myMD5 = md5.md5()
myMD5.update(str(label))
hexstring = myMD5.hexdigest()
color = "#" + hexstring[:6]
return color
|
zmathe/WebAppDIRAC
|
WebApp/handler/Palette.py
|
Python
|
gpl-3.0
| 3,496
| 0.016304
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
orm.MonitorItem.objects.filter(pk='users_reported').delete()
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
'misago.alert': {
'Meta': {'object_name': 'Alert'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']"}),
'variables': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'misago.ban': {
'Meta': {'object_name': 'Ban'},
'ban': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason_admin': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reason_user': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'test': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'misago.change': {
'Meta': {'object_name': 'Change'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'change': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Post']"}),
'post_content': ('django.db.models.fields.TextField', [], {}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Thread']"}),
'thread_name_new': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'thread_name_old': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'misago.checkpoint': {
'Meta': {'object_name': 'Checkpoint'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'agent': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'old_forum': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['misago.Forum']"}),
'old_forum_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'old_forum_slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'target_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.User']"}),
'target_user_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'target_user_slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'misago.fixture': {
'Meta': {'object_name': 'Fixture'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'misago.forum': {
'Meta': {'object_name': 'Forum'},
'attrs': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_preparsed': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_poster': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.User']"}),
'last_poster_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_poster_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_poster_style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_thread': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.Thread']"}),
'last_thread_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_thread_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_thread_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['misago.Forum']"}),
'posts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'posts_delta': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'prune_last': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'prune_start': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pruned_archive': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.Forum']"}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'redirects': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'redirects_delta': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'show_details': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'special': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'threads': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'threads_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '12'})
},
'misago.forumread': {
'Meta': {'object_name': 'ForumRead'},
'cleared': ('django.db.models.fields.DateTimeField', [], {}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']"})
},
'misago.forumrole': {
'Meta': {'object_name': 'ForumRole'},
'_permissions': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_column': "'permissions'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'misago.karma': {
'Meta': {'object_name': 'Karma'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Post']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'misago.monitoritem': {
'Meta': {'object_name': 'MonitorItem'},
'_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_column': "'value'", 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'int'", 'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'misago.newsletter': {
'Meta': {'object_name': 'Newsletter'},
'content_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_plain': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignore_subscriptions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'progress': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ranks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['misago.Rank']", 'symmetrical': 'False'}),
'step_size': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'misago.post': {
'Meta': {'object_name': 'Post'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'current_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'downvotes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'edit_reason': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'edit_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.User']"}),
'edit_user_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'edit_user_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'edits': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'mentions': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mention_set'", 'symmetrical': 'False', 'to': "orm['misago.User']"}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.TextField', [], {}),
'post_preparsed': ('django.db.models.fields.TextField', [], {}),
'protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reported': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Thread']"}),
'upvotes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'misago.pruningpolicy': {
'Meta': {'object_name': 'PruningPolicy'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_visit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'posts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'registered': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'misago.rank': {
'Meta': {'object_name': 'Rank'},
'as_tab': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'criteria': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'on_index': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['misago.Role']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'special': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'misago.role': {
'Meta': {'object_name': 'Role'},
'_permissions': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_column': "'permissions'", 'blank': 'True'}),
'_special': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_column': "'special'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'misago.session': {
'Meta': {'object_name': 'Session'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'agent': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'crawler': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'db_column': "'session_data'"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '42', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last': ('django.db.models.fields.DateTimeField', [], {}),
'matched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rank': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.Rank']"}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.User']"})
},
'misago.setting': {
'Meta': {'object_name': 'Setting'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.SettingsGroup']", 'to_field': "'key'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'normalize_to': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'separator': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'setting': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_default': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'misago.settingsgroup': {
'Meta': {'object_name': 'SettingsGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'misago.signinattempt': {
'Meta': {'object_name': 'SignInAttempt'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'})
},
'misago.thread': {
'Meta': {'object_name': 'Thread'},
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'downvotes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last': ('django.db.models.fields.DateTimeField', [], {}),
'last_post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.Post']"}),
'last_poster': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.User']"}),
'last_poster_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_poster_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_poster_style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'private_thread_set'", 'symmetrical': 'False', 'to': "orm['misago.User']"}),
'replies': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'replies_deleted': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'replies_moderated': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'replies_reported': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'report_for': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'report_set'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.Post']"}),
'score': ('django.db.models.fields.PositiveIntegerField', [], {'default': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'start_post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.Post']"}),
'start_poster': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'start_poster_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_poster_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'start_poster_style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upvotes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'misago.threadread': {
'Meta': {'object_name': 'ThreadRead'},
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Thread']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']"})
},
'misago.token': {
'Meta': {'object_name': 'Token'},
'accessed': ('django.db.models.fields.DateTimeField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '42', 'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'signin_tokens'", 'to': "orm['misago.User']"})
},
'misago.user': {
'Meta': {'object_name': 'User'},
'acl_key': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'activation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'alerts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'alerts_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'allow_pds': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'avatar_ban': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'avatar_ban_reason_admin': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'avatar_ban_reason_user': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'avatar_image': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'avatar_original': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'avatar_temp': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'email_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'followers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'following': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'follows': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'follows_set'", 'symmetrical': 'False', 'to': "orm['misago.User']"}),
'hide_activity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignores': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'ignores_set'", 'symmetrical': 'False', 'to': "orm['misago.User']"}),
'is_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'join_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'join_date': ('django.db.models.fields.DateTimeField', [], {}),
'join_ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'karma_given_n': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'karma_given_p': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'karma_n': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'karma_p': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'last_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'last_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'}),
'last_post': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_search': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password_date': ('django.db.models.fields.DateTimeField', [], {}),
'posts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rank': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Rank']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'ranking': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'receive_newsletters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['misago.Role']", 'symmetrical': 'False'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'signature': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'signature_ban': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'signature_ban_reason_admin': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'signature_ban_reason_user': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'signature_preparsed': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subscribe_reply': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'subscribe_start': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'sync_pds': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'threads': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'utc'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'unread_pds': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'username_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'misago.usernamechange': {
'Meta': {'object_name': 'UsernameChange'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_username': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'namechanges'", 'to': "orm['misago.User']"})
},
'misago.watchedthread': {
'Meta': {'object_name': 'WatchedThread'},
'email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_read': ('django.db.models.fields.DateTimeField', [], {}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']"})
}
}
complete_apps = ['misago']
symmetrical = True
|
tanglu-org/tgl-misago
|
misago/migrations/0015_remove_users_reported.py
|
Python
|
gpl-3.0
| 33,856
| 0.008034
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import connection
from django.utils.translation import ugettext as _
from modoboa.lib.exceptions import InternalError
def db_table_exists(table):
"""Check if table exists."""
return table in connection.introspection.table_names()
def db_type(cname="default"):
"""Return the type of the *default* database
Supported values : 'postgres', 'mysql', 'sqlite'
:param str cname: connection name
:return: a string or None
"""
if cname not in settings.DATABASES:
raise InternalError(
_("Connection to database %s not configured" % cname))
for t in ["postgres", "mysql", "sqlite"]:
if settings.DATABASES[cname]["ENGINE"].find(t) != -1:
return t
return None
|
tonioo/modoboa
|
modoboa/lib/db_utils.py
|
Python
|
isc
| 848
| 0
|
from base import *
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "Broken header entry III"
self.expected_error = 200
self.request = "GET / HTTP/1.0\r\n" +\
"Entry:value\r\n"
|
lmcro/webserver
|
qa/014-Broken-Key3.py
|
Python
|
gpl-2.0
| 294
| 0.013605
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import os
import sys
import re
import stat
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.dir import InventoryDirectory, get_file_parser
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.plugins import vars_loader
from ansible.utils.vars import combine_vars
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Inventory(object):
"""
Host inventory for ansible.
"""
def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = host_list
self._loader = loader
self._variable_manager = variable_manager
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
self._vars_per_host = {}
self._vars_per_group = {}
self._hosts_cache = {}
self._groups_list = {}
self._pattern_cache = {}
self._vars_plugins = []
self._groups_cache = {}
# to be set by calling set_playbook_basedir by playbook code
self._playbook_basedir = None
# the inventory object holds a list of groups
self.groups = []
# a list of host(names) to contain current inquiries to
self._restriction = None
self._subset = None
self.parse_inventory(host_list)
def parse_inventory(self, host_list):
if isinstance(host_list, basestring):
if "," in host_list:
host_list = host_list.split(",")
host_list = [ h for h in host_list if h and h.strip() ]
if host_list is None:
self.parser = None
elif isinstance(host_list, list):
self.parser = None
all = Group('all')
self.groups = [ all ]
ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?')
for x in host_list:
m = ipv6_re.match(x)
if m:
all.add_host(Host(m.groups()[0], m.groups()[1]))
else:
if ":" in x:
tokens = x.rsplit(":", 1)
# if there is ':' in the address, then this is an ipv6
if ':' in tokens[0]:
all.add_host(Host(x))
else:
all.add_host(Host(tokens[0], tokens[1]))
else:
all.add_host(Host(x))
elif os.path.exists(host_list):
#TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
if os.path.isdir(host_list):
# Ensure basedir is inside the directory
host_list = os.path.join(self.host_list, "")
self.parser = InventoryDirectory(loader=self._loader, filename=host_list)
else:
self.parser = get_file_parser(host_list, self._loader)
vars_loader.add_directory(self.basedir(), with_subdir=True)
if self.parser:
self.groups = self.parser.groups.values()
else:
# should never happen, but JIC
raise AnsibleError("Unable to parse %s as an inventory source" % host_list)
self._vars_plugins = [ x for x in vars_loader.all(self) ]
# FIXME: shouldn't be required, since the group/host vars file
# management will be done in VariableManager
# get group vars from group_vars/ files and vars plugins
for group in self.groups:
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts():
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
def _match(self, str, pattern_str):
try:
if pattern_str.startswith('~'):
return re.search(pattern_str[1:], str)
else:
return fnmatch.fnmatch(str, pattern_str)
except Exception as e:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
def _match_list(self, items, item_attr, pattern_str):
results = []
try:
if not pattern_str.startswith('~'):
pattern = re.compile(fnmatch.translate(pattern_str))
else:
pattern = re.compile(pattern_str[1:])
except Exception as e:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
for item in items:
if pattern.match(getattr(item, item_attr)):
results.append(item)
return results
def _split_pattern(self, pattern):
"""
takes e.g. "webservers[0:5]:dbservers:others"
and returns ["webservers[0:5]", "dbservers", "others"]
"""
term = re.compile(
r'''(?: # We want to match something comprising:
[^:\[\]] # (anything other than ':', '[', or ']'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)* # repeated as many times as possible
''', re.X
)
return [x for x in term.findall(pattern) if x]
def get_hosts(self, pattern="all", ignore_limits_and_restrictions=False):
"""
Takes a pattern or list of patterns and returns a list of matching
inventory host names, taking into account any active restrictions
or applied subsets
"""
# Enumerate all hosts matching the given pattern (which may be
# either a list of patterns or a string like 'pat1:pat2').
if isinstance(pattern, list):
pattern = ':'.join(pattern)
if ';' in pattern or ',' in pattern:
display.deprecated("Use ':' instead of ',' or ';' to separate host patterns", version=2.0, removed=True)
patterns = self._split_pattern(pattern)
hosts = self._evaluate_patterns(patterns)
# mainly useful for hostvars[host] access
if not ignore_limits_and_restrictions:
# exclude hosts not in a subset, if defined
if self._subset:
subset = self._evaluate_patterns(self._subset)
hosts = [ h for h in hosts if h in subset ]
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
hosts = [ h for h in hosts if h in self._restriction ]
return hosts
def _split_pattern(self, pattern):
"""
takes e.g. "webservers[0:5]:dbservers:others"
and returns ["webservers[0:5]", "dbservers", "others"]
"""
term = re.compile(
r'''(?: # We want to match something comprising:
[^:\[\]] # (anything other than ':', '[', or ']'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)* # repeated as many times as possible
''', re.X
)
return [x for x in term.findall(pattern) if x]
def _evaluate_patterns(self, patterns):
"""
Takes a list of patterns and returns a list of matching host names,
taking into account any negative and intersection patterns.
"""
# Host specifiers should be sorted to ensure consistent behavior
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
elif p:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
patterns = pattern_regular + pattern_intersection + pattern_exclude
hosts = []
for p in patterns:
# avoid resolving a pattern that is a plain host
if p in self._hosts_cache:
hosts.append(self.get_host(p))
else:
that = self._match_one_pattern(p)
if p.startswith("!"):
hosts = [ h for h in hosts if h not in that ]
elif p.startswith("&"):
hosts = [ h for h in hosts if h in that ]
else:
to_append = [ h for h in that if h.name not in [ y.name for y in hosts ] ]
hosts.extend(to_append)
return hosts
def _match_one_pattern(self, pattern):
"""
Takes a single pattern and returns a list of matching host names.
Ignores intersection (&) and exclusion (!) specifiers.
The pattern may be:
1. A regex starting with ~, e.g. '~[abc]*'
2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo'
3. An ordinary word that matches itself only, e.g. 'foo'
The pattern is matched using the following rules:
1. If it's 'all', it matches all hosts in all groups.
2. Otherwise, for each known group name:
(a) if it matches the group name, the results include all hosts
in the group or any of its children.
(b) otherwise, if it matches any hosts in the group, the results
include the matching hosts.
This means that 'foo*' may match one or more groups (thus including all
hosts therein) but also hosts in other groups.
The built-in groups 'all' and 'ungrouped' are special. No pattern can
match these group names (though 'all' behaves as though it matches, as
described above). The word 'ungrouped' can match a host of that name,
and patterns like 'ungr*' and 'al*' can match either hosts or groups
other than all and ungrouped.
If the pattern matches one or more group names according to these rules,
it may have an optional range suffix to select a subset of the results.
This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
would work if 'foo*' matched the name of one or more groups.
Duplicate matches are always eliminated from the results.
"""
if pattern.startswith("&") or pattern.startswith("!"):
pattern = pattern[1:]
if pattern not in self._pattern_cache:
(expr, slice) = self._split_subscript(pattern)
hosts = self._enumerate_matches(expr)
try:
hosts = self._apply_subscript(hosts, slice)
except IndexError:
raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
self._pattern_cache[pattern] = hosts
return self._pattern_cache[pattern]
def _split_subscript(self, pattern):
"""
Takes a pattern, checks if it has a subscript, and returns the pattern
without the subscript and a (start,end) tuple representing the given
subscript (or None if there is no subscript).
Validates that the subscript is in the right syntax, but doesn't make
sure the actual indices make sense in context.
"""
# Do not parse regexes for enumeration info
if pattern.startswith('~'):
return (pattern, None)
# We want a pattern followed by an integer or range subscript.
# (We can't be more restrictive about the expression because the
# fnmatch semantics permit [\[:\]] to occur.)
pattern_with_subscript = re.compile(
r'''^
(.+) # A pattern expression ending with...
\[(?: # A [subscript] expression comprising:
(-?[0-9]+) # A single positive or negative number
| # Or a numeric range
([0-9]+)([:-])([0-9]+)
)\]
$
''', re.X
)
subscript = None
m = pattern_with_subscript.match(pattern)
if m:
(pattern, idx, start, sep, end) = m.groups()
if idx:
subscript = (int(idx), None)
else:
subscript = (int(start), int(end))
if sep == '-':
display.deprecated("Use [x:y] inclusive subscripts instead of [x-y]", version=2.0, removed=True)
return (pattern, subscript)
def _apply_subscript(self, hosts, subscript):
"""
Takes a list of hosts and a (start,end) tuple and returns the subset of
hosts based on the subscript (which may be None to return all hosts).
"""
if not hosts or not subscript:
return hosts
(start, end) = subscript
if end:
return hosts[start:end+1]
else:
return [ hosts[start] ]
def _enumerate_matches(self, pattern):
"""
Returns a list of host names matching the given pattern according to the
rules explained above in _match_one_pattern.
"""
results = []
hosts = []
hostnames = set()
def __append_host_to_results(host):
if host.name not in hostnames:
hostnames.add(host.name)
results.append(host)
groups = self.get_groups()
for group in groups:
if pattern == 'all':
for host in group.get_hosts():
__append_host_to_results(host)
else:
if self._match(group.name, pattern) and group.name not in ('all', 'ungrouped'):
for host in group.get_hosts():
__append_host_to_results(host)
else:
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
for host in matching_hosts:
__append_host_to_results(host)
if pattern in C.LOCALHOST and len(results) == 0:
new_host = self._create_implicit_localhost(pattern)
results.append(new_host)
return results
def _create_implicit_localhost(self, pattern):
new_host = Host(pattern)
new_host.set_variable("ansible_python_interpreter", sys.executable)
new_host.set_variable("ansible_connection", "local")
new_host.ipv4_address = '127.0.0.1'
ungrouped = self.get_group("ungrouped")
if ungrouped is None:
self.add_group(Group('ungrouped'))
ungrouped = self.get_group('ungrouped')
self.get_group('all').add_child_group(ungrouped)
ungrouped.add_host(new_host)
return new_host
def clear_pattern_cache(self):
''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
self._pattern_cache = {}
def groups_for_host(self, host):
if host in self._hosts_cache:
return self._hosts_cache[host].get_groups()
else:
return []
def groups_list(self):
if not self._groups_list:
groups = {}
for g in self.groups:
groups[g.name] = [h.name for h in g.get_hosts()]
ancestors = g.get_ancestors()
for a in ancestors:
if a.name not in groups:
groups[a.name] = [h.name for h in a.get_hosts()]
self._groups_list = groups
self._groups_cache = {}
return self._groups_list
def get_groups(self):
return self.groups
def get_host(self, hostname):
if hostname not in self._hosts_cache:
self._hosts_cache[hostname] = self._get_host(hostname)
if hostname in C.LOCALHOST:
for host in C.LOCALHOST.difference((hostname,)):
self._hosts_cache[host] = self._hosts_cache[hostname]
return self._hosts_cache[hostname]
def _get_host(self, hostname):
if hostname in C.LOCALHOST:
for host in self.get_group('all').get_hosts():
if host.name in C.LOCALHOST:
return host
return self._create_implicit_localhost(hostname)
matching_host = None
for group in self.groups:
for host in group.get_hosts():
if hostname == host.name:
matching_host = host
self._hosts_cache[host.name] = host
return matching_host
def get_group(self, groupname):
if not self._groups_cache:
for group in self.groups:
self._groups_cache[group.name] = group
return self._groups_cache.get(groupname)
def get_group_variables(self, groupname, update_cached=False, vault_password=None):
if groupname not in self._vars_per_group or update_cached:
self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password)
return self._vars_per_group[groupname]
def _get_group_variables(self, groupname, vault_password=None):
group = self.get_group(groupname)
if group is None:
raise Exception("group not found: %s" % groupname)
vars = {}
# plugin.get_group_vars retrieves just vars for specific group
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# Read group_vars/ files
vars = combine_vars(vars, self.get_group_vars(group))
return vars
def get_vars(self, hostname, update_cached=False, vault_password=None):
host = self.get_host(hostname)
if not host:
raise AnsibleError("no vars as host is not in inventory: %s" % hostname)
return host.get_vars()
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
if hostname not in self._vars_per_host or update_cached:
self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password)
return self._vars_per_host[hostname]
def _get_host_variables(self, hostname, vault_password=None):
host = self.get_host(hostname)
if host is None:
raise AnsibleError("no host vars as host is not in inventory: %s" % hostname)
vars = {}
# plugin.run retrieves all vars (also from groups) for host
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# plugin.get_host_vars retrieves just vars for specific host
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# still need to check InventoryParser per host vars
# which actually means InventoryScript per host,
# which is not performant
if self.parser is not None:
vars = combine_vars(vars, self.parser.get_host_variables(host))
# Read host_vars/ files
vars = combine_vars(vars, self.get_host_vars(host))
return vars
def add_group(self, group):
if group.name not in self.groups_list():
self.groups.append(group)
self._groups_list = None # invalidate internal cache
self._groups_cache = {}
else:
raise AnsibleError("group already in inventory: %s" % group.name)
def list_hosts(self, pattern="all"):
""" return a list of hostnames for a pattern """
result = [ h for h in self.get_hosts(pattern) ]
if len(result) == 0 and pattern in C.LOCALHOST:
result = [pattern]
return result
def list_groups(self):
return sorted([ g.name for g in self.groups ], key=lambda x: x)
def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to batch serial operations in main playbook code, don't use this for other
reasons.
"""
if restriction is None:
return
elif not isinstance(restriction, list):
restriction = [ restriction ]
self._restriction = restriction
def subset(self, subset_pattern):
"""
Limits inventory results to a subset of inventory that matches a given
pattern, such as to select a given geographic of numeric slice amongst
a previous 'hosts' selection that only select roles, or vice versa.
Corresponds to --limit parameter to ansible-playbook
"""
if subset_pattern is None:
self._subset = None
else:
if ';' in subset_pattern or ',' in subset_pattern:
display.deprecated("Use ':' instead of ',' or ';' to separate host patterns", version=2.0, removed=True)
subset_patterns = self._split_pattern(subset_pattern)
results = []
# allow Unix style @filename data
for x in subset_patterns:
if x.startswith("@"):
fd = open(x[1:])
results.extend(fd.read().split("\n"))
fd.close()
else:
results.append(x)
self._subset = results
def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
def is_file(self):
""" did inventory come from a file? """
if not isinstance(self.host_list, basestring):
return False
return os.path.exists(self.host_list)
def basedir(self):
""" if inventory came from a file, what's the directory? """
dname = self.host_list
if not self.is_file():
dname = None
elif os.path.isdir(self.host_list):
dname = self.host_list
else:
dname = os.path.dirname(self.host_list)
if dname is None or dname == '' or dname == '.':
cwd = os.getcwd()
dname = cwd
if dname:
dname = os.path.abspath(dname)
return dname
def src(self):
""" if inventory came from a file, what's the directory and file name? """
if not self.is_file():
return None
return self.host_list
def playbook_basedir(self):
""" returns the directory of the current playbook """
return self._playbook_basedir
def set_playbook_basedir(self, dir_name):
"""
sets the base directory of the playbook so inventory can use it as a
basedir for host_ and group_vars, and other things.
"""
# Only update things if dir is a different playbook basedir
if dir_name != self._playbook_basedir:
self._playbook_basedir = dir_name
# get group vars from group_vars/ files
# FIXME: excluding the new_pb_basedir directory may result in group_vars
# files loading more than they should, however with the file caching
# we do this shouldn't be too much of an issue. Still, this should
# be fixed at some point to allow a "first load" to touch all of the
# directories, then later runs only touch the new basedir specified
for group in self.groups:
#group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
group.vars = combine_vars(group.vars, self.get_group_vars(group))
# get host vars from host_vars/ files
for host in self.get_hosts():
#host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
host.vars = combine_vars(host.vars, self.get_host_vars(host))
# invalidate cache
self._vars_per_host = {}
self._vars_per_group = {}
def get_host_vars(self, host, new_pb_basedir=False):
""" Read host_vars/ files """
return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir)
def get_group_vars(self, group, new_pb_basedir=False):
""" Read group_vars/ files """
return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir)
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False):
"""
Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel
to the inventory base directory or in the same directory as the playbook. Variables in the playbook
dir will win over the inventory dir if files are in both.
"""
results = {}
scan_pass = 0
_basedir = self.basedir()
# look in both the inventory base directory and the playbook base directory
# unless we do an update for a new playbook base dir
if not new_pb_basedir:
basedirs = [_basedir, self._playbook_basedir]
else:
basedirs = [self._playbook_basedir]
for basedir in basedirs:
display.debug('getting vars from %s' % basedir)
# this can happen from particular API usages, particularly if not run
# from /usr/bin/ansible-playbook
if basedir is None:
basedir = './'
scan_pass = scan_pass + 1
# it's not an eror if the directory does not exist, keep moving
if not os.path.exists(basedir):
continue
# save work of second scan if the directories are the same
if _basedir == self._playbook_basedir and scan_pass != 1:
continue
# FIXME: these should go to VariableManager
if group and host is None:
# load vars in dir/group_vars/name_of_group
base_path = os.path.realpath(os.path.join(basedir, "group_vars/%s" % group.name))
results = self._variable_manager.add_group_vars_file(base_path, self._loader)
elif host and group is None:
# same for hostvars in dir/host_vars/name_of_host
base_path = os.path.realpath(os.path.join(basedir, "host_vars/%s" % host.name))
results = self._variable_manager.add_host_vars_file(base_path, self._loader)
# all done, results is a dictionary of variables for this particular host.
return results
def refresh_inventory(self):
self.clear_pattern_cache()
self._hosts_cache = {}
self._vars_per_host = {}
self._vars_per_group = {}
self._groups_list = {}
self._groups_cache = {}
self.groups = []
self.parse_inventory(self.host_list)
|
mcltn/ansible
|
lib/ansible/inventory/__init__.py
|
Python
|
gpl-3.0
| 28,850
| 0.00357
|
"""Library for performing speech recognition with the Google Speech Recognition API."""
__author__ = 'Anthony Zhang (Uberi)'
__version__ = '1.0.4'
__license__ = 'BSD'
import io, subprocess, wave, shutil
import math, audioop, collections
import json, urllib.request
#wip: filter out clicks and other too short parts
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
try:
import pyaudio
class Microphone(AudioSource):
def __init__(self, device_index = None):
self.device_index = device_index
self.format = pyaudio.paInt16 # 16-bit int sampling
self.SAMPLE_WIDTH = pyaudio.get_sample_size(self.format)
self.RATE = 16000 # sampling rate in Hertz
self.CHANNELS = 1 # mono audio
self.CHUNK = 1024 # number of frames stored in each buffer
self.audio = None
self.stream = None
def __enter__(self):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(
input_device_index = self.device_index,
format = self.format, rate = self.RATE, channels = self.CHANNELS, frames_per_buffer = self.CHUNK,
input = True, # stream is an input stream
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio.terminate()
except ImportError:
pass
class WavFile(AudioSource):
def __init__(self, filename_or_fileobject):
if isinstance(filename_or_fileobject, str):
self.filename = filename_or_fileobject
else:
self.filename = None
self.wav_file = filename_or_fileobject
self.stream = None
def __enter__(self):
if self.filename: self.wav_file = open(self.filename, "rb")
self.wav_reader = wave.open(self.wav_file, "rb")
self.SAMPLE_WIDTH = self.wav_reader.getsampwidth()
self.RATE = self.wav_reader.getframerate()
self.CHANNELS = self.wav_reader.getnchannels()
assert self.CHANNELS == 1 # audio must be mono
self.CHUNK = 4096
self.stream = WavFile.WavStream(self.wav_reader)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.filename: self.wav_file.close()
self.stream = None
class WavStream(object):
def __init__(self, wav_reader):
self.wav_reader = wav_reader
def read(self, size = -1):
if size == -1:
return self.wav_reader.readframes(self.wav_reader.getnframes())
return self.wav_reader.readframes(size)
class AudioData(object):
def __init__(self, rate, data):
self.rate = rate
self.data = data
class Recognizer(AudioSource):
def __init__(self, language = "fr-FR", key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"):
self.key = key
self.language = language
self.energy_threshold = 1500 # minimum audio energy to consider for recording
self.pause_threshold = 0.8 # seconds of quiet time before a phrase is considered complete
self.quiet_duration = 0.5 # amount of quiet time to keep on both sides of the recording
def samples_to_flac(self, source, frame_data):
import platform, os
with io.BytesIO() as wav_file:
with wave.open(wav_file, "wb") as wav_writer:
wav_writer.setsampwidth(source.SAMPLE_WIDTH)
wav_writer.setnchannels(source.CHANNELS)
wav_writer.setframerate(source.RATE)
wav_writer.writeframes(frame_data)
wav_data = wav_file.getvalue()
# determine which converter executable to use
system = platform.system()
path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored
if shutil.which("flac") is not None: # check for installed version first
flac_converter = shutil.which("flac")
elif system == "Windows" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}: # Windows NT, use the bundled FLAC conversion utility
flac_converter = os.path.join(path, "flac-win32.exe")
elif system == "Linux" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}:
flac_converter = os.path.join(path, "flac-linux-i386")
else:
raise ChildProcessError("FLAC conversion utility not available - consider installing the FLAC utility")
process = subprocess.Popen("\"%s\" --stdout --totally-silent --best -" % flac_converter, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
flac_data, stderr = process.communicate(wav_data)
return flac_data
def record(self, source, duration = None):
assert isinstance(source, AudioSource) and source.stream
frames = io.BytesIO()
seconds_per_buffer = source.CHUNK / source.RATE
elapsed_time = 0
while True: # loop for the total number of chunks needed
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def listen(self, source, timeout = None):
assert isinstance(source, AudioSource) and source.stream
# record audio data as raw samples
frames = collections.deque()
assert self.pause_threshold >= self.quiet_duration >= 0
seconds_per_buffer = source.CHUNK / source.RATE
pause_buffer_count = math.ceil(self.pause_threshold / seconds_per_buffer) # number of buffers of quiet audio before the phrase is complete
quiet_buffer_count = math.ceil(self.quiet_duration / seconds_per_buffer) # maximum number of buffers of quiet audio to retain before and after
elapsed_time = 0
# store audio input until the phrase starts
while True:
# handle timeout if specified
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout:
raise TimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
# check if the audio input has stopped being quiet
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
break
if len(frames) > quiet_buffer_count: # ensure we only keep the needed amount of quiet buffers
frames.popleft()
# read audio input until the phrase ends
pause_count = 0
while True:
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
# check if the audio input has gone quiet for longer than the pause threshold
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# obtain frame data
for i in range(quiet_buffer_count, pause_buffer_count): frames.pop() # remove extra quiet frames at the end
frame_data = b"".join(list(frames))
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def recognize(self, audio_data, show_all = False):
assert isinstance(audio_data, AudioData)
url = "http://www.google.com/speech-api/v2/recognize?client=chromium&lang=%s&key=%s" % (self.language, self.key)
self.request = urllib.request.Request(url, data = audio_data.data, headers = {"Content-Type": "audio/x-flac; rate=%s" % audio_data.rate})
# check for invalid key response from the server
try:
response = urllib.request.urlopen(self.request)
except:
raise KeyError("Server wouldn't respond (invalid key or quota has been maxed out)")
response_text = response.read().decode("utf-8")
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
# make sure we have a list of transcriptions
if "alternative" not in actual_result:
raise LookupError("Speech is unintelligible")
# return the best guess unless told to do otherwise
if not show_all:
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
return prediction["transcript"]
raise LookupError("Speech is unintelligible")
spoken_text = []
# check to see if Google thinks it's 100% correct
default_confidence = 0
if len(actual_result["alternative"])==1: default_confidence = 1
# return all the possibilities
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
spoken_text.append({"text":prediction["transcript"],"confidence":prediction["confidence"]})
else:
spoken_text.append({"text":prediction["transcript"],"confidence":default_confidence})
return spoken_text
if __name__ == "__main__":
r = Recognizer()
m = Microphone()
while True:
print("Say something!")
with m as source:
audio = r.listen(source)
print("Got it! Now to recognize it...")
try:
print("You said " + r.recognize(audio))
except LookupError:
print("Oops! Didn't catch that")
|
bizalu/sImulAcre
|
core/lib/speech_recognition/__init__.py
|
Python
|
gpl-2.0
| 10,485
| 0.010205
|
#!/usr/bin/env python3
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'models/research/slim'))
import time
import datetime
import logging
from tqdm import tqdm
import numpy as np
import cv2
import simplejson as json
from sklearn.metrics import accuracy_score, roc_auc_score
import tensorflow as tf
import tensorflow.contrib.layers as layers
import tensorflow.contrib.slim as slim
from nets import nets_factory, resnet_utils #import get_network_fn
import picpac
def patch_arg_scopes ():
def resnet_arg_scope (weight_decay=0.0001):
print_red("Patching resnet_v2 arg_scope when training from scratch")
return resnet_utils.resnet_arg_scope(weight_decay=weight_decay,
batch_norm_decay=0.9,
batch_norm_epsilon=5e-4,
batch_norm_scale=False)
nets_factory.arg_scopes_map['resnet_v1_50'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v1_101'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v1_152'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v1_200'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_50'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_101'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_152'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_200'] = resnet_arg_scope
pass
augments = None
#from . config import *
#if os.path.exists('config.py'):
def print_red (txt):
print('\033[91m' + txt + '\033[0m')
def print_green (txt):
print('\033[92m' + txt + '\033[0m')
print(augments)
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('db', None, 'training db')
flags.DEFINE_string('val_db', None, 'validation db')
flags.DEFINE_integer('classes', 2, 'number of classes')
flags.DEFINE_string('mixin', None, 'mix-in training db')
flags.DEFINE_integer('size', None, '')
flags.DEFINE_integer('batch', 1, 'Batch size. ')
flags.DEFINE_integer('channels', 3, '')
flags.DEFINE_integer('shift', 0, '')
flags.DEFINE_integer('stride', 16, '')
flags.DEFINE_integer('max_size', 2000, '')
flags.DEFINE_boolean('cache', True, '')
flags.DEFINE_integer('picpac_dump', 0, '')
flags.DEFINE_string('augments', None, 'augment config file')
flags.DEFINE_string('backbone', 'resnet_v2_50', 'architecture')
flags.DEFINE_string('model', None, 'model directory')
flags.DEFINE_string('resume', None, 'resume training from this model')
flags.DEFINE_string('finetune', None, '')
flags.DEFINE_integer('max_to_keep', 100, '')
# optimizer settings
flags.DEFINE_float('lr', 0.01, 'Initial learning rate.')
flags.DEFINE_float('decay_rate', 0.95, '')
flags.DEFINE_float('decay_steps', 500, '')
flags.DEFINE_float('weight_decay', 0.00004, '')
#
flags.DEFINE_integer('epoch_steps', None, '')
flags.DEFINE_integer('max_epochs', 20000, '')
flags.DEFINE_integer('ckpt_epochs', 10, '')
flags.DEFINE_integer('val_epochs', 10, '')
flags.DEFINE_boolean('adam', False, '')
flags.DEFINE_boolean('vgg', False, '')
COLORSPACE = 'BGR'
PIXEL_MEANS = tf.constant([[[[127.0, 127.0, 127.0]]]])
PIXEL_MEANS1 = tf.constant([[[[127.0]]]])
VGG_PIXEL_MEANS = tf.constant([[[[103.94, 116.78, 123.68]]]])
def fcn_loss (logits, labels):
logits = tf.reshape(logits, (-1, FLAGS.classes))
labels = tf.reshape(labels, (-1,))
# cross-entropy
xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
xe = tf.reduce_mean(xe, name='xe')
# accuracy
acc = tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32)
acc = tf.reduce_mean(acc, name='acc')
# regularization
reg = tf.reduce_sum(tf.losses.get_regularization_losses())
reg = tf.identity(reg, name='re')
# loss
loss = tf.identity(xe + reg, name='lo')
return loss, [acc, xe, reg, loss]
def setup_finetune (ckpt, exclusions):
print("Finetuning %s" % ckpt)
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
print("Excluding %s" % var.op.name)
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(ckpt):
ckpt = tf.train.latest_checkpoint(ckpt)
variables_to_train = []
for scope in exclusions:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
print("Training %d out of %d variables" % (len(variables_to_train), len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))))
if len(variables_to_train) < 10:
for var in variables_to_train:
print(" %s" % var.op.name)
return slim.assign_from_checkpoint_fn(
ckpt, variables_to_restore,
ignore_missing_vars=False), variables_to_train
def create_picpac_stream (db_path, is_training):
assert os.path.exists(db_path)
augments = []
if is_training:
if FLAGS.augments:
with open(FLAGS.augments, 'r') as f:
augments = json.loads(f.read())
print("Using augments:")
print(json.dumps(augments))
else:
augments = [
#{"type": "augment.flip", "horizontal": True, "vertical": False},
{"type": "augment.rotate", "min":-10, "max":10},
{"type": "augment.scale", "min":0.9, "max":1.1},
{"type": "augment.add", "range":20},
]
else:
augments = []
config = {"db": db_path,
"loop": is_training,
"shuffle": is_training,
"reshuffle": is_training,
"annotate": True,
"channels": FLAGS.channels,
"stratify": is_training,
"dtype": "float32",
"batch": FLAGS.batch,
"colorspace": COLORSPACE,
"cache": FLAGS.cache,
"dump": FLAGS.picpac_dump,
"transforms": augments + [
{"type": "resize", "max_size": FLAGS.max_size},
{"type": "clip", "round": FLAGS.stride},
{"type": "rasterize"},
]
}
if is_training and not FLAGS.mixin is None:
print("mixin support is incomplete in new picpac.")
assert os.path.exists(FLAGS.mixin)
config['mixin'] = FLAGS.mixin
config['mixin_group_reset'] = 0
config['mixin_group_delta'] = 1
pass
return picpac.ImageStream(config)
def main (_):
global PIXEL_MEANS
global PIXEL_MEANS1
if FLAGS.channels == 1:
PIXEL_MEANS = PIXEL_MEANS1
logging.basicConfig(filename='train-%s-%s.log' % (FLAGS.backbone, datetime.datetime.now().strftime('%Y%m%d-%H%M%S')),level=logging.DEBUG, format='%(asctime)s %(message)s')
if FLAGS.model:
try:
os.makedirs(FLAGS.model)
except:
pass
if FLAGS.finetune or FLAGS.vgg:
print_red("finetune, using RGB with vgg pixel means")
COLORSPACE = 'RGB'
PIXEL_MEANS = VGG_PIXEL_MEANS
X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
# ground truth labels
Y = tf.placeholder(tf.int32, shape=(None, None, None, 1), name="labels")
is_training = tf.placeholder(tf.bool, name="is_training")
if not FLAGS.finetune:
patch_arg_scopes()
#with \
# slim.arg_scope([slim.batch_norm], decay=0.9, epsilon=5e-4):
network_fn = nets_factory.get_network_fn(FLAGS.backbone, num_classes=None,
weight_decay=FLAGS.weight_decay, is_training=is_training)
ft, _ = network_fn(X-PIXEL_MEANS, global_pool=False, output_stride=16)
FLAGS.stride = 16
with tf.variable_scope('head'):
logits = slim.conv2d_transpose(ft, FLAGS.classes, 32, 16)
logits = tf.identity(logits, name='logits')
# probability of class 1 -- not very useful if FLAGS.classes > 2
probs = tf.squeeze(tf.slice(tf.nn.softmax(logits), [0,0,0,1], [-1,-1,-1,1]), 3)
loss, metrics = fcn_loss(logits, Y)
metric_names = [x.name[:-2] for x in metrics]
def format_metrics (avg):
return ' '.join(['%s=%.3f' % (a, b) for a, b in zip(metric_names, list(avg))])
init_finetune, variables_to_train = None, None
if FLAGS.finetune:
print_red("finetune, using RGB with vgg pixel means")
init_finetune, variables_to_train = setup_finetune(FLAGS.finetune, ['head'])
global_step = tf.train.create_global_step()
LR = tf.train.exponential_decay(FLAGS.lr, global_step, FLAGS.decay_steps, FLAGS.decay_rate, staircase=True)
if FLAGS.adam:
print("Using Adam optimizer, reducing LR by 100x")
optimizer = tf.train.AdamOptimizer(LR/100)
else:
optimizer = tf.train.MomentumOptimizer(learning_rate=LR, momentum=0.9)
print(variables_to_train)
train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step, variables_to_train=variables_to_train)
saver = tf.train.Saver(max_to_keep=FLAGS.max_to_keep)
stream = create_picpac_stream(FLAGS.db, True)
# load validation db
val_stream = None
if FLAGS.val_db:
val_stream = create_picpac_stream(FLAGS.val_db, False)
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
epoch_steps = FLAGS.epoch_steps
if epoch_steps is None:
epoch_steps = (stream.size() + FLAGS.batch-1) // FLAGS.batch
best = 0
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
if init_finetune:
init_finetune(sess)
if FLAGS.resume:
saver.restore(sess, FLAGS.resume)
global_start_time = time.time()
epoch = 0
step = 0
while epoch < FLAGS.max_epochs:
start_time = time.time()
cnt, metrics_sum = 0, np.array([0] * len(metrics), dtype=np.float32)
progress = tqdm(range(epoch_steps), leave=False)
for _ in progress:
_, images, labels = stream.next()
feed_dict = {X: images, Y: labels, is_training: True}
mm, _ = sess.run([metrics, train_op], feed_dict=feed_dict)
metrics_sum += np.array(mm) * images.shape[0]
cnt += images.shape[0]
metrics_txt = format_metrics(metrics_sum/cnt)
progress.set_description(metrics_txt)
step += 1
pass
stop = time.time()
msg = 'train epoch=%d step=%d ' % (epoch, step)
msg += metrics_txt
msg += ' elapsed=%.3f time=%.3f ' % (stop - global_start_time, stop - start_time)
print_green(msg)
logging.info(msg)
epoch += 1
if (epoch % FLAGS.val_epochs == 0) and val_stream:
lr = sess.run(LR)
# evaluation
Ys, Ps = [], []
cnt, metrics_sum = 0, np.array([0] * len(metrics), dtype=np.float32)
val_stream.reset()
progress = tqdm(val_stream, leave=False)
for _, images, labels in progress:
feed_dict = {X: images, Y: labels, is_training: False}
p, mm = sess.run([probs, metrics], feed_dict=feed_dict)
metrics_sum += np.array(mm) * images.shape[0]
cnt += images.shape[0]
Ys.extend(list(meta.labels))
Ps.extend(list(p))
metrics_txt = format_metrics(metrics_sum/cnt)
progress.set_description(metrics_txt)
pass
assert cnt == val_stream.size()
avg = metrics_sum / cnt
if avg[0] > best:
best = avg[0]
msg = 'valid epoch=%d step=%d ' % (epoch-1, step)
msg += metrics_txt
if FLAGS.classes == 2:
# display scikit-learn metrics
Ys = np.array(Ys, dtype=np.int32)
Ps = np.array(Ps, dtype=np.float32)
msg += ' sk_acc=%.3f auc=%.3f' % (accuracy_score(Ys, Ps > 0.5), roc_auc_score(Ys, Ps))
pass
msg += ' lr=%.4f best=%.3f' % (lr, best)
print_red(msg)
logging.info(msg)
#log.write('%d\t%s\t%.4f\n' % (epoch, '\t'.join(['%.4f' % x for x in avg]), best))
# model saving
if (epoch % FLAGS.ckpt_epochs == 0) and FLAGS.model:
ckpt_path = '%s/%d' % (FLAGS.model, epoch)
saver.save(sess, ckpt_path)
print('saved to %s.' % ckpt_path)
pass
pass
pass
if __name__ == '__main__':
try:
tf.app.run()
except KeyboardInterrupt:
pass
|
aaalgo/cls
|
train-slim-fcn.py
|
Python
|
mit
| 13,078
| 0.005811
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 2 17:52:19 2017
Author: Peiyong Jiang : jiangpeiyong@impcas.ac.cn
Function:
______________________________________________________
"""
from numpy.random import multivariate_normal as npmvn
from numpy import diag
def PartGen(emitT,numPart):
meanPart=[0.,0.,0.,0.,0.,0.]
covPart=diag([emitT[0],emitT[0],emitT[1],emitT[1],emitT[2],emitT[2]])
x,xp,y,yp,z,zp=npmvn(meanPart,covPart,numPart).T
return x,xp,y,yp,z,zp
|
iABC2XYZ/abc
|
DM_RFGAP_3/PartGen.py
|
Python
|
gpl-3.0
| 516
| 0.05814
|
"""This *was* the parser for the current HTML format on parl.gc.ca.
But now we have XML. See parl_document.py.
This module is organized like so:
__init__.py - utility functions, simple parse interface
common.py - infrastructure used in the parsers, i.e. regexes
current.py - parser for the Hansard format used from 2006 to the present
old.py - (fairly crufty) parser for the format used from 1994 to 2006
"""
from parliament.imports.hans_old.common import *
import logging
logger = logging.getLogger(__name__)
class HansardParser2009(HansardParser):
def __init__(self, hansard, html):
for regex in STARTUP_RE_2009:
html = re.sub(regex[0], regex[1], html)
super(HansardParser2009, self).__init__(hansard, html)
for x in self.soup.findAll('a', 'deleteMe'):
x.findParent('div').extract()
def process_related_link(self, tag, string, current_politician=None):
#print "PROCESSING RELATED for %s" % string
resid = re.search(r'ResourceID=(\d+)', tag['href'])
restype = re.search(r'ResourceType=(Document|Affiliation)', tag['href'])
if not resid and restype:
return string
resid, restype = int(resid.group(1)), restype.group(1)
if restype == 'Document':
try:
bill = Bill.objects.get_by_legisinfo_id(resid)
except Bill.DoesNotExist:
match = re.search(r'\b[CS]\-\d+[A-E]?\b', string)
if not match:
logger.error("Invalid bill link %s" % string)
return string
bill = Bill.objects.create_temporary_bill(legisinfo_id=resid,
number=match.group(0), session=self.hansard.session)
except Exception, e:
print "Related bill search failed for callback %s" % resid
print repr(e)
return string
return u'<bill id="%d" name="%s">%s</bill>' % (bill.id, escape(bill.name), string)
elif restype == 'Affiliation':
try:
pol = Politician.objects.getByParlID(resid)
except Politician.DoesNotExist:
print "Related politician search failed for callback %s" % resid
if getattr(settings, 'PARLIAMENT_LABEL_FAILED_CALLBACK', False):
# FIXME migrate away from internalxref?
InternalXref.objects.get_or_create(schema='pol_parlid', int_value=resid, target_id=-1)
return string
if pol == current_politician:
return string # When someone mentions her riding, don't link back to her
return u'<pol id="%d" name="%s">%s</pol>' % (pol.id, escape(pol.name), string)
def get_text(self, cursor):
text = u''
for string in cursor.findAll(text=parsetools.r_hasText):
if string.parent.name == 'a' and string.parent['class'] == 'WebOption':
text += self.process_related_link(string.parent, string, self.t['politician'])
else:
text += unicode(string)
return text
def parse(self):
super(HansardParser2009, self).parse()
# Initialize variables
t = ParseTracker()
self.t = t
member_refs = {}
# Get the date
c = self.soup.find(text='OFFICIAL REPORT (HANSARD)').findNext('h2')
self.date = datetime.datetime.strptime(c.string.strip(), "%A, %B %d, %Y").date()
self.hansard.date = self.date
self.hansard.save()
c = c.findNext(text=r_housemet)
match = re.search(r_housemet, c.string)
t['timestamp'] = self.houseTime(match.group(1), match.group(2))
t.setNext('timestamp', t['timestamp'])
# Move the pointer to the start
c = c.next
# And start the big loop
while c is not None:
# It's a string
if not hasattr(c, 'name'):
pass
# Heading
elif c.name == 'h2':
c = c.next
if not parsetools.isString(c): raise ParseException("Expecting string right after h2")
t.setNext('heading', parsetools.titleIfNecessary(parsetools.tameWhitespace(c.string.strip())))
# Topic
elif c.name == 'h3':
top = c.find(text=r_letter)
#if not parsetools.isString(c):
# check if it's an empty header
# if c.parent.find(text=r_letter):
# raise ParseException("Expecting string right after h3")
if top is not None:
c = top
t['topic_set'] = True
t.setNext('topic', parsetools.titleIfNecessary(parsetools.tameWhitespace(c.string.strip())))
elif c.name == 'h4':
if c.string == 'APPENDIX':
self.saveStatement(t)
print "Appendix reached -- we're done!"
break
# Timestamp
elif c.name == 'a' and c.has_key('name') and c['name'].startswith('T'):
match = re.search(r'^T(\d\d)(\d\d)$', c['name'])
if match:
t.setNext('timestamp', parsetools.time_to_datetime(
hour=int(match.group(1)),
minute=int(match.group(2)),
date=self.date))
else:
raise ParseException("Couldn't match time %s" % c.attrs['name'])
elif c.name == 'b' and c.string:
# Something to do with written answers
match = r_honorific.search(c.string)
if match:
# It's a politician asking or answering a question
# We don't get a proper link here, so this has to be a name match
polname = re.sub(r'\(.+\)', '', match.group(2)).strip()
self.saveStatement(t)
t['member_title'] = c.string.strip()
t['written_question'] = True
try:
pol = Politician.objects.get_by_name(polname, session=self.hansard.session)
t['politician'] = pol
t['member'] = ElectedMember.objects.get_by_pol(politician=pol, date=self.date)
except Politician.DoesNotExist:
print "WARNING: No name match for %s" % polname
except Politician.MultipleObjectsReturned:
print "WARNING: Multiple pols for %s" % polname
else:
if not c.string.startswith('Question'):
print "WARNING: Unexplained boldness: %s" % c.string
# div -- the biggie
elif c.name == 'div':
origdiv = c
if c.find('b'):
# We think it's a new speaker
# Save the current buffer
self.saveStatement(t)
c = c.find('b')
if c.find('a'):
# There's a link...
c = c.find('a')
match = re.search(r'ResourceType=Affiliation&ResourceID=(\d+)', c['href'])
if match and c.find(text=r_letter):
parlwebid = int(match.group(1))
# We have the parl ID. First, see if we already know this ID.
pol = Politician.objects.getByParlID(parlwebid, lookOnline=False)
if pol is None:
# We don't. Try to do a quick name match first (if flags say so)
if not GET_PARLID_ONLINE:
who = c.next.string
match = re.search(r_honorific, who)
if match:
polname = re.sub(r'\(.+\)', '', match.group(2)).strip()
try:
#print "Looking for %s..." % polname,
pol = Politician.objects.get_by_name(polname, session=self.hansard.session)
#print "found."
except Politician.DoesNotExist:
pass
except Politician.MultipleObjectsReturned:
pass
if pol is None:
# Still no match. Go online...
try:
pol = Politician.objects.getByParlID(parlwebid, session=self.hansard.session)
except Politician.DoesNotExist:
print "WARNING: Couldn't find politician for ID %d" % parlwebid
if pol is not None:
t['member'] = ElectedMember.objects.get_by_pol(politician=pol, date=self.date)
t['politician'] = pol
c = c.next
if not parsetools.isString(c): raise Exception("Expecting string in b for member name")
t['member_title'] = c.strip()
#print c
if t['member_title'].endswith(':'): # Remove colon in e.g. Some hon. members:
t['member_title'] = t['member_title'][:-1]
# Sometimes we don't get a link for short statements -- see if we can identify by backreference
if t['member']:
member_refs[t['member_title']] = t['member']
# Also save a backref w/o position/riding
member_refs[re.sub(r'\s*\(.+\)\s*', '', t['member_title'])] = t['member']
elif t['member_title'] in member_refs:
t['member'] = member_refs[t['member_title']]
t['politician'] = t['member'].politician
c.findParent('b').extract() # We've got the title, now get the rest of the paragraph
c = origdiv
t.addText(self.get_text(c))
else:
# There should be text in here
if c.find('div'):
if c.find('div', 'Footer'):
# We're done!
self.saveStatement(t)
print "Footer div reached -- done!"
break
raise Exception("I wasn't expecting another div in here")
txt = self.get_text(c).strip()
if r_proceedings.search(txt):
self.saveStatement(t)
self.saveProceedingsStatement(txt, t)
else:
t.addText(txt, blockquote=bool(c.find('small')))
else:
#print c.name
if c.name == 'b':
print "B: ",
print c
#if c.name == 'p':
# print "P: ",
# print c
c = c.next
return self.statements
|
twhyte/openparliament
|
parliament/imports/hans_old/current.py
|
Python
|
agpl-3.0
| 11,722
| 0.006398
|
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from google.cloud import bigquery
from google.cloud.bigquery.job import ExtractJobConfig, DestinationFormat
from google.api_core import exceptions
from kfp_component.google.bigquery import query
CREATE_JOB_MODULE = 'kfp_component.google.bigquery._query'
@mock.patch(CREATE_JOB_MODULE + '.display.display')
@mock.patch(CREATE_JOB_MODULE + '.gcp_common.dump_file')
@mock.patch(CREATE_JOB_MODULE + '.KfpExecutionContext')
@mock.patch(CREATE_JOB_MODULE + '.bigquery.Client')
class TestQuery(unittest.TestCase):
def test_query_succeed(self, mock_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
mock_client().get_job.side_effect = exceptions.NotFound('not found')
mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1')
mock_client().dataset.return_value = mock_dataset
mock_client().get_dataset.side_effect = exceptions.NotFound('not found')
mock_response = {
'configuration': {
'query': {
'query': 'SELECT * FROM table_1'
}
}
}
mock_client().query.return_value.to_api_repr.return_value = mock_response
result = query('SELECT * FROM table_1', 'project-1', 'dataset-1',
output_gcs_path='gs://output/path')
self.assertEqual(mock_response, result)
mock_client().create_dataset.assert_called()
expected_job_config = bigquery.QueryJobConfig()
expected_job_config.create_disposition = bigquery.job.CreateDisposition.CREATE_IF_NEEDED
expected_job_config.write_disposition = bigquery.job.WriteDisposition.WRITE_TRUNCATE
expected_job_config.destination = mock_dataset.table('query_ctx1')
mock_client().query.assert_called_with('SELECT * FROM table_1',mock.ANY,
job_id = 'query_ctx1')
actual_job_config = mock_client().query.call_args_list[0][0][1]
self.assertDictEqual(
expected_job_config.to_api_repr(),
actual_job_config.to_api_repr()
)
extract = mock_client().extract_table.call_args_list[0]
self.assertEqual(extract[0], (mock_dataset.table('query_ctx1'), 'gs://output/path',))
self.assertEqual(extract[1]["job_config"].destination_format, "CSV",)
def test_query_no_output_path(self, mock_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
mock_client().get_job.side_effect = exceptions.NotFound('not found')
mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1')
mock_client().dataset.return_value = mock_dataset
mock_client().get_dataset.return_value = bigquery.Dataset(mock_dataset)
mock_response = {
'configuration': {
'query': {
'query': 'SELECT * FROM table_1'
}
}
}
mock_client().query.return_value.to_api_repr.return_value = mock_response
result = query('SELECT * FROM table_1', 'project-1', 'dataset-1', 'table-1')
self.assertEqual(mock_response, result)
mock_client().create_dataset.assert_not_called()
mock_client().extract_table.assert_not_called()
expected_job_config = bigquery.QueryJobConfig()
expected_job_config.create_disposition = bigquery.job.CreateDisposition.CREATE_IF_NEEDED
expected_job_config.write_disposition = bigquery.job.WriteDisposition.WRITE_TRUNCATE
expected_job_config.destination = mock_dataset.table('table-1')
mock_client().query.assert_called_with('SELECT * FROM table_1',mock.ANY,
job_id = 'query_ctx1')
actual_job_config = mock_client().query.call_args_list[0][0][1]
self.assertDictEqual(
expected_job_config.to_api_repr(),
actual_job_config.to_api_repr()
)
def test_query_output_json_format(self, mock_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
mock_client().get_job.side_effect = exceptions.NotFound('not found')
mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1')
mock_client().dataset.return_value = mock_dataset
mock_client().get_dataset.side_effect = exceptions.NotFound('not found')
mock_response = {
'configuration': {
'query': {
'query': 'SELECT * FROM table_1'
}
}
}
mock_client().query.return_value.to_api_repr.return_value = mock_response
result = query('SELECT * FROM table_1', 'project-1', 'dataset-1',
output_gcs_path='gs://output/path',
output_destination_format="NEWLINE_DELIMITED_JSON")
self.assertEqual(mock_response, result)
mock_client().create_dataset.assert_called()
extract = mock_client().extract_table.call_args_list[0]
self.assertEqual(extract[0], (mock_dataset.table('query_ctx1'), 'gs://output/path',))
self.assertEqual(extract[1]["job_config"].destination_format, "NEWLINE_DELIMITED_JSON",)
|
kubeflow/pipelines
|
components/gcp/container/component_sdk/python/tests/google/bigquery/test__query.py
|
Python
|
apache-2.0
| 5,793
| 0.005351
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
from ansible import errors
from ansible import utils
import shlex
import re
import ast
class InventoryParser(object):
"""
Host inventory for ansible.
"""
def __init__(self, filename=C.DEFAULT_HOST_LIST):
with open(filename) as fh:
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
self._parse()
def _parse(self):
self._parse_base_groups()
self._parse_group_children()
self._add_allgroup_children()
self._parse_group_variables()
return self.groups
@staticmethod
def _parse_value(v):
if "#" not in v:
try:
return ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
return v
# [webservers]
# alpha
# beta:2345
# gamma sudo=True user=root
# delta asdf=jkl favcolor=red
def _add_allgroup_children(self):
for group in self.groups.values():
if group.depth == 0 and group.name != 'all':
self.groups['all'].add_child_group(group)
def _parse_base_groups(self):
# FIXME: refactor
ungrouped = Group(name='ungrouped')
all = Group(name='all')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
for line in self.lines:
line = utils.before_comment(line).strip()
if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line:
active_group_name = active_group_name.rsplit(":", 1)[0]
if active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
active_group_name = None
elif active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
elif line.startswith(";") or line == '':
pass
elif active_group_name:
tokens = shlex.split(line)
if len(tokens) == 0:
continue
hostname = tokens[0]
port = C.DEFAULT_REMOTE_PORT
# Three cases to check:
# 0. A hostname that contains a range pesudo-code and a port
# 1. A hostname that contains just a port
if hostname.count(":") > 1:
# Possible an IPv6 address, or maybe a host line with multiple ranges
# IPv6 with Port XXX:XXX::XXX.port
# FQDN foo.example.com
if hostname.count(".") == 1:
(hostname, port) = hostname.rsplit(".", 1)
elif ("[" in hostname and
"]" in hostname and
":" in hostname and
(hostname.rindex("]") < hostname.rindex(":")) or
("]" not in hostname and ":" in hostname)):
(hostname, port) = hostname.rsplit(":", 1)
hostnames = []
if detect_range(hostname):
hostnames = expand_hostname_range(hostname)
else:
hostnames = [hostname]
for hn in hostnames:
host = None
if hn in self.hosts:
host = self.hosts[hn]
else:
host = Host(name=hn, port=port)
self.hosts[hn] = host
if len(tokens) > 1:
for t in tokens[1:]:
if t.startswith('#'):
break
try:
(k,v) = t.split("=", 1)
except ValueError, e:
raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e)))
host.set_variable(k, self._parse_value(v))
self.groups[active_group_name].add_host(host)
# [southeast:children]
# atlanta
# raleigh
def _parse_group_children(self):
group = None
for line in self.lines:
line = line.strip()
if line is None or line == '':
continue
if line.startswith("[") and ":children]" in line:
line = line.replace("[","").replace(":children]","")
group = self.groups.get(line, None)
if group is None:
group = self.groups[line] = Group(name=line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
raise errors.AnsibleError("child group is not defined: (%s)" % line)
else:
group.add_child_group(kid_group)
# [webservers:vars]
# http_port=1234
# maxRequestsPerChild=200
def _parse_group_variables(self):
group = None
for line in self.lines:
line = line.strip()
if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
raise errors.AnsibleError("can't add vars to undefined group: %s" % line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif line == '':
pass
elif group:
if "=" not in line:
raise errors.AnsibleError("variables assigned to group must be in key=value form")
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host):
return {}
|
wulczer/ansible
|
lib/ansible/inventory/ini.py
|
Python
|
gpl-3.0
| 7,628
| 0.003146
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2016 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import sqlalchemy as sa
from sqlalchemy import orm
_engine = None
_session_maker = None
DB_CONNECTION = "sqlite:////tmp/restalchemy-%s.db" % uuid.uuid4()
def get_engine():
global _engine
if _engine is None:
_engine = sa.create_engine(DB_CONNECTION, echo=True)
return _engine
def get_session():
return orm.sessionmaker(bind=get_engine())
|
phantomii/restalchemy
|
restalchemy/tests/functional/restapi/sa_based/microservice/db.py
|
Python
|
apache-2.0
| 1,082
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.
# Author: Yannick Gouin <yannick.gouin@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
import time
from osv import fields, osv
from tools.translate import _
import tools
from tools import ustr
class gap_analysis(osv.Model):
_inherit = "gap_analysis"
_name = "gap_analysis"
def generate_project(self, cr, uid, ids, context=None):
project_pool = self.pool.get('project.project')
task_pool = self.pool.get('project.task')
for gap in self.browse(cr, uid, ids, context=context):
partner_id = gap.partner_id and gap.partner_id.id or False
notes = gap.note or ''
project_vals = {
'name': gap.name,
'description': notes,
'user_id': gap.user_id.id,
'partner_id': partner_id,
'gap_analysis_id': gap.id,
}
project_id = project_pool.create(cr, uid, project_vals, context=context)
for gap_line in gap.gap_lines:
if gap_line.to_project and gap_line.keep:
time4dev = 0
time4tech = 0
time4fct = 0
time4test = gap_line.testing or 0
if gap_line.effort:
if gap_line.effort.unknown:
time4dev = gap_line.duration_wk
else:
time4dev = gap_line.effort.duration
for workload in gap_line.workloads:
if workload.type.category == "Technical Analysis":
time4tech += workload.duration
else:
time4fct += workload.duration
# Create Tasks
if time4dev > 0 or time4tech > 0 or time4fct > 0 or time4test > 0:
maintask_vals = {
'name': gap_line.functionality.name[0:100],
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_functional and gap.user_functional.id or False,
'gap_line_id': gap_line.id,
'to_report': True,
'org_planned_hours': 0,
'planned_hours': 0,
'remaining_hours': 0,
}
maintask_id = task_pool.create(cr, uid, maintask_vals, context=context)
maintask_id = [int(maintask_id)]
if time4test > 0:
task_vals4test = {
'name': gap_line.functionality.name[0:100] + " [TEST]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4test,
'planned_hours': time4test,
'remaining_hours': time4test,
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_test and gap.user_test.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4test, context=context)
if time4dev > 0:
task_vals4dev = {
'name': gap_line.functionality.name[0:100] + " [DEV]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4dev,
'planned_hours': time4dev,
'remaining_hours': time4dev,
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_dev and gap.user_dev.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4dev, context=context)
if time4tech > 0:
task_vals4tech = {
'name': gap_line.functionality.name[0:100] + " [TECH]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4tech,
'planned_hours': time4tech,
'remaining_hours': time4tech,
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_technical and gap.user_technical.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4tech, context=context)
if time4fct > 0:
task_vals4fct = {
'name': gap_line.functionality.name[0:100] + " [FUNC]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4fct,
'planned_hours': time4fct,
'remaining_hours': time4fct,
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.functionality.category and gap_line.functionality.category.id or False,
'user_id': gap.user_functional and gap.user_functional.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4fct, context=context)
if project_id:
return {
'type': 'ir.actions.act_window',
'name':"Generated Project",
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'project.project',
'res_id': project_id,
'context': context
}
return True
class gap_analysis_line(osv.Model):
_name = "gap_analysis.line"
_inherit = "gap_analysis.line"
_columns = {
'to_project': fields.boolean('Add to project ?', help='Specify whether this functionality must create a task or not when you generate a project.'),
}
_defaults = {
'to_project': True,
}
class openerp_module(osv.Model):
_name = "openerp_module"
_columns = {
'name': fields.char('Name', size=128, required=True),
'version': fields.char('Version', size=128),
'note': fields.text('Note'),
}
class project(osv.Model):
_inherit = "project.project"
_name = "project.project"
_columns = {
'gap_analysis_id': fields.many2one('gap_analysis', 'Gap Analysis'),
}
class project_task(osv.Model):
_inherit = "project.task"
_name = "project.task"
def _get_parent_category(self, cr, uid, ids, fields, args, context=None):
context = context or {}
res = {}
for task in self.browse(cr, uid, ids):
res[task.id] = task.gap_category_id and task.gap_category_id.parent_id.id or False
return res
def _task_to_update_after_category_change(self, cr, uid, ids, fields=None, arg=None, context=None):
if type(ids) != type([]):
ids = [ids]
return self.pool.get('project.task').search(cr, uid, [('gap_category_id', 'in', ids)]) or []
def _get_child_tasks(self, cr, uid, ids, context=None):
if type(ids) != type([]):
ids = [ids]
cr.execute("SELECT DISTINCT parent_id FROM project_task_parent_rel WHERE task_id in %s", (tuple(ids),))
task_ids = filter(None, map(lambda x:x[0], cr.fetchall())) or []
return task_ids
def _get_child_hours(self, cr, uid, ids, field_names, args, context=None):
result = {}
for task in self.browse(cr, uid, ids, context=context):
res = {}
child_org_planned_hours = 0.0
child_planned_hours = 0.0
child_remaining_hours = 0.0
for child in task.child_ids:
child_org_planned_hours += child.org_planned_hours
child_planned_hours += child.planned_hours
child_remaining_hours += child.remaining_hours
res['child_org_planned_hours'] = child_org_planned_hours
res['child_planned_hours'] = child_planned_hours
res['child_remaining_hours'] = child_remaining_hours
result[task.id] = res
return result
# def onchange_planned(self, cr, uid, ids, planned = 0.0, effective = 0.0):
# return {'value':{'remaining_hours': planned - effective, 'org_planned_hours':planned}}
_columns = {
'child_org_planned_hours': fields.function(_get_child_hours, string='Child Original Planned Hours', multi='child_hours', help="Computed using the sum of the child tasks Original planned hours.",
store = {
'project.task': (_get_child_tasks, ['org_planned_hours','planned_hours'], 10),
}),
'child_planned_hours': fields.function(_get_child_hours, string='Child Planned Hours', multi='child_hours', help="Computed using the sum of the child tasks planned hours.",
store = {
'project.task': (_get_child_tasks, ['planned_hours','remaining_hours'], 10),
}),
'child_remaining_hours': fields.function(_get_child_hours, string='Child Remaining Hours', multi='child_hours', help="Computed using the sum of the child tasks work done.",
store = {
'project.task': (_get_child_tasks, ['planned_hours','remaining_hours'], 10),
}),
'module_id': fields.many2one('openerp_module', 'Module', select=True),
'gap_category_id': fields.many2one('gap_analysis.functionality.category','Category', select=True),
'parent_category': fields.function(_get_parent_category, method=True, type='many2one', obj='gap_analysis.functionality.category', string='Parent Category', store={'project.task': (lambda self, cr, uid, ids, context: ids, ['gap_category_id'], 10), 'gap_analysis.functionality.category': (_task_to_update_after_category_change, ['parent_id'], 10),}),
'gap_line_id': fields.many2one('gap_analysis.line', 'Gap Analysis Line', select=True),
'code_gap': fields.char('Code in Gap', size=6),
'to_report': fields.boolean('Report to customer'),
'org_planned_hours': fields.float('Original Planned Hours', help='Original estimated time to do the task, usually set by the project manager when the task is in draft state.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
udayinfy/openerp-7.0
|
gap_analysis_project/gap_analysis_project.py
|
Python
|
agpl-3.0
| 13,460
| 0.008915
|
#!/usr/bin/env python
"""Contains the Data Model for a cool Resource.
"""
__author__ = "Sanjay Joshi"
__copyright__ = "IBM Copyright 2017"
__credits__ = ["Sanjay Joshi"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Sanjay Joshi"
__email__ = "joshisa@us.ibm.com"
__status__ = "Prototype"
schema = {
'url': 'corpora/ada_diabetes/concepts',
'schema': {
'cloudhost': {
'type': 'string',
'default': 'Powered by IBM Bluemix and Python Eve'
},
'base16': {
'type': 'string',
'default': '######'
},
'hex': {
'type': 'string',
'default': '##-##-##'
},
'organization': {
'type': 'string',
'default': 'Doh!MissingOrg'
}
},
'allow_unknown': True
}
|
joshisa/mistub
|
mistub/models/concepts.py
|
Python
|
apache-2.0
| 833
| 0
|
import astra
def gpu_fp(pg, vg, v):
v_id = astra.data2d.create('-vol', vg, v)
rt_id = astra.data2d.create('-sino', pg)
fp_cfg = astra.astra_dict('FP_CUDA')
fp_cfg['VolumeDataId'] = v_id
fp_cfg['ProjectionDataId'] = rt_id
fp_id = astra.algorithm.create(fp_cfg)
astra.algorithm.run(fp_id)
out = astra.data2d.get(rt_id)
astra.algorithm.delete(fp_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
def gpu_bp(pg, vg, rt, supersampling=1):
v_id = astra.data2d.create('-vol', vg)
rt_id = astra.data2d.create('-sino', pg, data=rt)
bp_cfg = astra.astra_dict('BP_CUDA')
bp_cfg['ReconstructionDataId'] = v_id
bp_cfg['ProjectionDataId'] = rt_id
bp_id = astra.algorithm.create(bp_cfg)
astra.algorithm.run(bp_id)
out = astra.data2d.get(v_id)
astra.algorithm.delete(bp_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
def gpu_fbp(pg, vg, rt):
rt_id = astra.data2d.create('-sino', pg, data=rt)
v_id = astra.data2d.create('-vol', vg)
fbp_cfg = astra.astra_dict('FBP_CUDA')
fbp_cfg['ReconstructionDataId'] = v_id
fbp_cfg['ProjectionDataId'] = rt_id
#fbp_cfg['FilterType'] = 'none'
fbp_id = astra.algorithm.create(fbp_cfg)
astra.algorithm.run(fbp_id, 100)
out = astra.data2d.get(v_id)
astra.algorithm.delete(fbp_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
def gpu_sirt(pg, vg, rt, n_iters=100):
rt_id = astra.data2d.create('-sino', pg, data=rt)
v_id = astra.data2d.create('-vol', vg)
sirt_cfg = astra.astra_dict('SIRT_CUDA')
sirt_cfg['ReconstructionDataId'] = v_id
sirt_cfg['ProjectionDataId'] = rt_id
#sirt_cfg['option'] = {}
#sirt_cfg['option']['MinConstraint'] = 0
sirt_id = astra.algorithm.create(sirt_cfg)
astra.algorithm.run(sirt_id, n_iters)
out = astra.data2d.get(v_id)
astra.algorithm.delete(sirt_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
|
buzmakov/tomography_scripts
|
tomo/yaivan/dispersion/alg.py
|
Python
|
mit
| 1,930
| 0.031088
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Locates imports that violate cirq's submodule dependencies.
Specifically, this test treats the modules as a tree structure where `cirq` is
the root, each submodule is a node and each python file is a leaf node. While
a node (module) is in the process of being imported, it is not allowed to import
nodes for the first time other than its children. If a module was imported
earlier by `cirq.__init__`, it may be imported. This is currently only enforced
for the first level of submodules under cirq, not sub-submodules.
Usage:
dev_tools/import_test.py [-h] [--time] [--others]
optional arguments:
-h, --help show this help message and exit
--time print a report of the modules that took the longest to import
--others also track packages other than cirq and print when they are
imported
"""
from typing import List
import argparse
import collections
import os.path
import subprocess
import sys
import time
parser = argparse.ArgumentParser(
description="Locates imports that violate cirq's submodule dependencies."
)
parser.add_argument(
'--time',
action='store_true',
help='print a report of the modules that took the longest to import',
)
parser.add_argument(
'--others',
action='store_true',
help='also track packages other than cirq and print when they are imported',
)
def verify_import_tree(depth: int = 1, track_others: bool = False, timeit: bool = False) -> bool:
"""Locates imports that violate cirq's submodule dependencies by
instrumenting python import machinery then importing cirq.
Logs when each submodule (up to the given depth) begins and ends executing
during import and prints an error when any import within a submodule causes
a neighboring module to be imported for the first time. The indent
pattern of the printed output will match the module tree structure if the
imports are all valid. Otherwise an error is printed indicating the
location of the invalid import.
Output for valid imports:
Start cirq
...
Start cirq.study
End cirq.study
Start cirq.circuits
End cirq.circuits
Start cirq.schedules
End cirq.schedules
...
End cirq
Output for an invalid import in `cirq/circuits/circuit.py`:
Start cirq
...
Start cirq.study
End cirq.study
Start cirq.circuits
ERROR: cirq.circuits.circuit imported cirq.vis
Start cirq.vis
End cirq.vis
... # Possibly more errors caused by the first.
End cirq.circuits
Start cirq.schedules
End cirq.schedules
...
End cirq
Invalid import: cirq.circuits.circuit imported cirq.vis
Args:
depth: How deep in the module tree to verify. If depth is 1, verifies
that submodules of cirq like cirq.ops doesn't import cirq.circuit.
If depth is 2, verifies that submodules and sub-submodules like
cirq.ops.raw_types doesn't import cirq.ops.common_gates or
cirq.circuit.
track_others: If True, logs where cirq first imports an external package
in addition to logging when cirq modules are imported.
timeit: Measure the import time of cirq and each submodule and print a
report of the worst. Includes times for external packages used by
cirq if `track_others` is True.
Returns:
True is no import issues, False otherwise.
"""
fail_list = []
start_times = {}
load_times = {}
current_path: List[str] = []
currently_running_paths: List[List[str]] = [[]]
import_depth = 0
indent = ' ' * 2
def wrap_module(module):
nonlocal import_depth
start_times[module.__name__] = time.perf_counter()
path = module.__name__.split('.')
if path[0] != 'cirq':
if len(path) == 1:
print(f'{indent * import_depth}Other {module.__name__}')
return module
currently_running_paths.append(path)
if len(path) == len(current_path) + 1 and path[:-1] == current_path:
# Move down in tree
current_path.append(path[-1])
else:
# Jump somewhere else in the tree
handle_error(currently_running_paths[-2], path)
current_path[:] = path
if len(path) <= depth + 1:
print(f'{indent * import_depth}Start {module.__name__}')
import_depth += 1
return module
def after_exec(module):
nonlocal import_depth
load_times[module.__name__] = time.perf_counter() - start_times[module.__name__]
path = module.__name__.split('.')
if path[0] != 'cirq':
return
assert path == currently_running_paths.pop(), 'Unexpected import state'
if len(path) <= depth + 1:
import_depth -= 1
print(f'{indent * import_depth}End {module.__name__}')
if path == current_path:
# No submodules were here
current_path.pop()
elif len(path) == len(current_path) - 1 and path == current_path[:-1]:
# Move up in tree
current_path.pop()
else:
# Jump somewhere else in the tree
current_path[:] = path[:-1]
def handle_error(import_from, import_to):
if import_from[: depth + 1] != import_to[: depth + 1]:
msg = f"{'.'.join(import_from)} imported {'.'.join(import_to)}"
fail_list.append(msg)
print(f'ERROR: {msg}')
# Import wrap_module_executions without importing cirq
orig_path = list(sys.path)
project_dir = os.path.dirname(os.path.dirname(__file__))
cirq_dir = os.path.join(project_dir, 'cirq')
sys.path.append(cirq_dir) # Put cirq/_import.py in the path.
from cirq._import import wrap_module_executions # type: ignore
sys.path[:] = orig_path # Restore the path.
sys.path.append(project_dir) # Ensure the cirq package is in the path.
# note that with the cirq.google injection we do change the metapath
with wrap_module_executions('' if track_others else 'cirq', wrap_module, after_exec, False):
# Import cirq with instrumentation
import cirq # pylint: disable=unused-import
sys.path[:] = orig_path # Restore the path.
if fail_list:
print()
# Only print the first because later errors are often caused by the
# first and not as helpful.
print(f'Invalid import: {fail_list[0]}')
if timeit:
worst_loads = collections.Counter(load_times).most_common(15)
print()
print('Worst load times:')
for name, dt in worst_loads:
print(f'{dt:.3f} {name}')
return not fail_list
FAIL_EXIT_CODE = 65
def test_no_circular_imports():
"""Runs the test in a subprocess because cirq has already been imported
before in an earlier test but this test needs to control the import process.
"""
status = subprocess.call([sys.executable, __file__])
if status == FAIL_EXIT_CODE:
# coverage: ignore
raise Exception('Invalid import. See captured output for details.')
elif status != 0:
# coverage: ignore
raise RuntimeError('Error in subprocess')
if __name__ == '__main__':
args = parser.parse_args()
success = verify_import_tree(track_others=args.others, timeit=args.time)
sys.exit(0 if success else FAIL_EXIT_CODE)
|
quantumlib/Cirq
|
dev_tools/import_test.py
|
Python
|
apache-2.0
| 8,158
| 0.000981
|
from flask import jsonify
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Table, Column, Integer, ForeignKey
from src.webservice.base import Base
from src.webservice._action import Action
db = SQLAlchemy()
Base.query = db.session.query_property()
class Input(Base):
__tablename__ = 'tbl_InputPin'
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer())
name = db.Column(db.String(50))
parent_id = db.Column(db.Integer, db.ForeignKey('tbl_Arduino.id'))
parent = db.relationship('Device')
time_between_clicks = db.Column(db.Float(50))
actions = db.relationship("Action", secondary="tbl_InputPin_Action")
@staticmethod
def get_all_inputs():
inputs = Input.query.outerjoin(Input.actions).all()
output = []
for input in inputs:
actions = []
actions_id = []
for action in input.actions:
actions.append(action.name)
actions_id.append(action.id)
input_data = {'id': input.id, 'name': input.name, 'device_name': input.parent.name, 'actions_id': actions_id,
'number': input.number, 'time_between_clicks': input.time_between_clicks, 'actions': actions}
output.append(input_data)
db.session.commit()
return jsonify({'response': output})
@staticmethod
def update_input(request):
data = request.get_json()
input = db.session.query(Input).filter_by(id=data['id']).first()
if 'name' in data:
input.name = data['name']
if 'time_between_clicks' in data:
input.time_between_clicks = data['time_between_clicks']
if 'actions_id':
actions = Action.get_actions(data['actions_id'])
input.actions = actions
db.session.commit()
return jsonify({'result': 'User has been changed'})
|
deklungel/iRulez
|
src/webservice/_inputPin.py
|
Python
|
mit
| 1,909
| 0.001572
|
from distutils.core import setup
setup(
name='Chroma',
version='0.2.0',
author='Seena Burns',
author_email='hello@seenaburns.com',
url='https://github.com/seenaburns/Chroma',
license=open('LICENSE.txt').read(),
description='Color handling made simple.',
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
packages=['chroma'],
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
),
)
|
seenaburns/Chroma
|
setup.py
|
Python
|
bsd-3-clause
| 768
| 0.001302
|
"""
Python wrappers for Orthogonal Distance Regression (ODRPACK).
Classes
=======
Data -- stores the data and weights to fit against
RealData -- stores data with standard deviations and covariance matrices
Model -- stores the model and its related information
Output -- stores all of the output from an ODR run
ODR -- collects all data and runs the fitting routine
Exceptions
==========
odr_error -- error sometimes raised inside odr() and can be raised in the
fitting functions to tell ODRPACK to halt the procedure
odr_stop -- error to raise in fitting functions to tell ODRPACK that the data or
parameters given are invalid
Use
===
Basic use:
1) Define the function you want to fit against.
::
def f(B, x):
''' Linear function y = m*x + b '''
return B[0]*x + B[1]
# B is a vector of the parameters.
# x is an array of the current x values.
# x is same format as the x passed to Data or RealData.
# Return an array in the same format as y passed to Data or RealData.
2) Create a Model.
::
linear = Model(f)
3) Create a Data or RealData instance.
::
mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2))
or
::
mydata = RealData(x, y, sx=sx, sy=sy)
4) Instantiate ODR with your data, model and initial parameter estimate.
::
myodr = ODR(mydata, linear, beta0=[1., 2.])
5) Run the fit.
::
myoutput = myodr.run()
6) Examine output.
::
myoutput.pprint()
Read the docstrings and the accompanying tests for more advanced usage.
Notes
=====
* Array formats -- FORTRAN stores its arrays in memory column first, i.e. an
array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently,
NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For
efficiency and convenience, the input and output arrays of the fitting
function (and its Jacobians) are passed to FORTRAN without transposition.
Therefore, where the ODRPACK documentation says that the X array is of shape
(N, M), it will be passed to the Python function as an array of shape (M, N).
If M==1, the one-dimensional case, then nothing matters; if M>1, then your
Python functions will be dealing with arrays that are indexed in reverse of
the ODRPACK documentation. No real biggie, but watch out for your indexing of
the Jacobians: the i,j'th elements (@f_i/@x_j) evaluated at the n'th
observation will be returned as jacd[j, i, n]. Except for the Jacobians, it
really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course,
you can always use the transpose() function from scipy explicitly.
* Examples -- See the accompanying file test/test.py for examples of how to set
up fits of your own. Some are taken from the User's Guide; some are from
other sources.
* Models -- Some common models are instantiated in the accompanying module
models.py . Contributions are welcome.
Credits
=======
* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs.
Robert Kern
robert.kern@gmail.com
"""
import numpy
from scipy.odr import __odrpack
__all__ = ['odr', 'odr_error', 'odr_stop', 'Data', 'RealData', 'Model',
'Output', 'ODR']
odr = __odrpack.odr
odr_error = __odrpack.odr_error
odr_stop = __odrpack.odr_stop
def _conv(obj, dtype=None):
""" Convert an object to the preferred form for input to the odr routine.
"""
if obj is None:
return obj
else:
if dtype is None:
obj = numpy.asarray(obj)
else:
obj = numpy.asarray(obj, dtype)
if obj.shape == ():
# Scalar.
return obj.dtype.type(obj)
else:
return obj
def _report_error(info):
""" Interprets the return code of the odr routine.
Parameters
----------
info : int
The return code of the odr routine.
Returns
-------
problems : list(str)
A list of messages about why the odr() routine stopped.
"""
stopreason = ('Blank',
'Sum of squares convergence',
'Parameter convergence',
'Both sum of squares and parameter convergence',
'Iteration limit reached')[info % 5]
if info >= 5:
# questionable results or fatal error
I = (info/10000 % 10,
info/1000 % 10,
info/100 % 10,
info/10 % 10,
info % 10)
problems = []
if I[0] == 0:
if I[1] != 0:
problems.append('Derivatives possibly not correct')
if I[2] != 0:
problems.append('Error occurred in callback')
if I[3] != 0:
problems.append('Problem is not full rank at solution')
problems.append(stopreason)
elif I[0] == 1:
if I[1] != 0:
problems.append('N < 1')
if I[2] != 0:
problems.append('M < 1')
if I[3] != 0:
problems.append('NP < 1 or NP > N')
if I[4] != 0:
problems.append('NQ < 1')
elif I[0] == 2:
if I[1] != 0:
problems.append('LDY and/or LDX incorrect')
if I[2] != 0:
problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect')
if I[3] != 0:
problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect')
if I[4] != 0:
problems.append('LWORK and/or LIWORK too small')
elif I[0] == 3:
if I[1] != 0:
problems.append('STPB and/or STPD incorrect')
if I[2] != 0:
problems.append('SCLB and/or SCLD incorrect')
if I[3] != 0:
problems.append('WE incorrect')
if I[4] != 0:
problems.append('WD incorrect')
elif I[0] == 4:
problems.append('Error in derivatives')
elif I[0] == 5:
problems.append('Error occurred in callback')
elif I[0] == 6:
problems.append('Numerical error detected')
return problems
else:
return [stopreason]
class Data(object):
"""
scipy.odr.Data(x, y=None, we=None, wd=None, fix=None, meta={})
The Data class stores the data to fit.
Parameters
----------
x : array_like
Input data for regression.
y : array_like, optional
Input data for regression.
we : array_like, optional
If `we` is a scalar, then that value is used for all data points (and
all dimensions of the response variable).
If `we` is a rank-1 array of length q (the dimensionality of the
response variable), then this vector is the diagonal of the covariant
weighting matrix for all data points.
If `we` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th response variable
observation (single-dimensional only).
If `we` is a rank-2 array of shape (q, q), then this is the full
covariant weighting matrix broadcast to each observation.
If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the
diagonal of the covariant weighting matrix for the i'th observation.
If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
If the fit is implicit, then only a positive scalar value is used.
wd : array_like, optional
If `wd` is a scalar, then that value is used for all data points
(and all dimensions of the input variable). If `wd` = 0, then the
covariant weighting matrix for each observation is set to the identity
matrix (so each dimension of each observation has the same weight).
If `wd` is a rank-1 array of length m (the dimensionality of the input
variable), then this vector is the diagonal of the covariant weighting
matrix for all data points.
If `wd` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th input variable observation
(single-dimensional only).
If `wd` is a rank-2 array of shape (m, m), then this is the full
covariant weighting matrix broadcast to each observation.
If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the
diagonal of the covariant weighting matrix for the i'th observation.
If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
fix : array_like of ints, optional
The `fix` argument is the same as ifixx in the class ODR. It is an
array of integers with the same shape as data.x that determines which
input observations are treated as fixed. One can use a sequence of
length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
meta : dict, optional
Freeform dictionary for metadata.
Notes
-----
Each argument is attached to the member of the instance of the same name.
The structures of `x` and `y` are described in the Model class docstring.
If `y` is an integer, then the Data instance can only be used to fit with
implicit models where the dimensionality of the response is equal to the
specified value of `y`.
The `we` argument weights the effect a deviation in the response variable
has on the fit. The `wd` argument weights the effect a deviation in the
input variable has on the fit. To handle multidimensional inputs and
responses easily, the structure of these arguments has the n'th
dimensional axis first. These arguments heavily use the structured
arguments feature of ODRPACK to conveniently and flexibly support all
options. See the ODRPACK User's Guide for a full explanation of how these
weights are used in the algorithm. Basically, a higher value of the weight
for a particular data point makes a deviation at that point more
detrimental to the fit.
"""
def __init__(self, x, y=None, we=None, wd=None, fix=None, meta={}):
self.x = _conv(x)
self.y = _conv(y)
self.we = _conv(we)
self.wd = _conv(wd)
self.fix = _conv(fix)
self.meta = meta
def set_meta(self, **kwds):
""" Update the metadata dictionary with the keywords and data provided
by keywords.
Examples
--------
data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay")
"""
self.meta.update(kwds)
def __getattr__(self, attr):
""" Dispatch aatribute access to the metadata dictionary.
"""
if attr in self.meta.keys():
return self.meta[attr]
else:
raise AttributeError("'%s' not in metadata" % attr)
class RealData(Data):
""" The RealData class stores the weightings as actual standard deviations
and/or covariances.
The weights needed for ODRPACK are generated on-the-fly with __getattr__
trickery.
sx and sy are standard deviations of x and y and are converted to weights by
dividing 1.0 by their squares.
E.g. wd = 1./numpy.power(sx, 2)
covx and covy are arrays of covariance matrices and are converted to weights
by performing a matrix inversion on each observation's covariance matrix.
E.g. we[i] = numpy.linalg.inv(covy[i]) # i in range(len(covy))
# if covy.shape == (n,q,q)
These arguments follow the same structured argument conventions as wd and we
only restricted by their natures: sx and sy can't be rank-3, but covx and
covy can be.
Only set *either* sx or covx (not both). Setting both will raise an
exception. Same with sy and covy.
The argument and member fix is the same as Data.fix and ODR.ifixx:
It is an array of integers with the same shape as data.x that determines
which input observations are treated as fixed. One can use a sequence of
length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
"""
def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None,
fix=None, meta={}):
if (sx is not None) and (covx is not None):
raise ValueError("cannot set both sx and covx")
if (sy is not None) and (covy is not None):
raise ValueError("cannot set both sy and covy")
# Set flags for __getattr__
self._ga_flags = {}
if sx is not None:
self._ga_flags['wd'] = 'sx'
else:
self._ga_flags['wd'] = 'covx'
if sy is not None:
self._ga_flags['we'] = 'sy'
else:
self._ga_flags['we'] = 'covy'
self.x = _conv(x)
self.y = _conv(y)
self.sx = _conv(sx)
self.sy = _conv(sy)
self.covx = _conv(covx)
self.covy = _conv(covy)
self.fix = _conv(fix)
self.meta = meta
def _sd2wt(self, sd):
""" Convert standard deviation to weights.
"""
return 1./numpy.power(sd, 2)
def _cov2wt(self, cov):
""" Convert covariance matrix(-ices) to weights.
"""
from numpy.dual import inv
if len(cov.shape) == 2:
return inv(cov)
else:
weights = numpy.zeros(cov.shape, float)
for i in range(cov.shape[-1]): # n
weights[:,:,i] = inv(cov[:,:,i])
return weights
def __getattr__(self, attr):
lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx),
('wd', 'covx'): (self._cov2wt, self.covx),
('we', 'sy'): (self._sd2wt, self.sy),
('we', 'covy'): (self._cov2wt, self.covy)}
if attr not in ('wd', 'we'):
if attr in self.meta.keys():
return self.meta[attr]
else:
raise AttributeError("'%s' not in metadata" % attr)
else:
func, arg = lookup_tbl[(attr, self._ga_flags[attr])]
if arg is not None:
return apply(func, (arg,))
else:
return None
class Model(object):
"""
The Model class stores information about the function you wish to fit.
It stores the function itself, at the least, and optionally stores
functions which compute the Jacobians used during fitting. Also, one
can provide a function that will provide reasonable starting values
for the fit parameters possibly given the set of data.
Parameters
----------
fcn : function
fcn(beta, x) --> y
fjacb : function
Jacobian of fcn wrt the fit parameters beta.
fjacb(beta, x) --> @f_i(x,B)/@B_j
fjacd : function
Jacobian of fcn wrt the (possibly multidimensional) input
variable.
fjacd(beta, x) --> @f_i(x,B)/@x_j
extra_args : tuple, optional
If specified, `extra_args` should be a tuple of extra
arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called
by `apply(fcn, (beta, x) + extra_args)`
estimate : array_like of rank-1
Provides estimates of the fit parameters from the data
estimate(data) --> estbeta
implicit : boolean
If TRUE, specifies that the model
is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit
against
meta : dict, optional
freeform dictionary of metadata for the model
Notes
-----
Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and
return a NumPy array. The `estimate` object takes an instance of the
Data class.
Here are the rules for the shapes of the argument and return arrays :
x -- if the input data is single-dimensional, then x is rank-1
array; i.e. x = array([1, 2, 3, ...]); x.shape = (n,)
If the input data is multi-dimensional, then x is a rank-2 array;
i.e., x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n) In
all cases, it has the same shape as the input data array passed to
odr(). m is the dimensionality of the input data, n is the number
of observations.
y -- if the response variable is single-dimensional, then y is a
rank-1 array, i.e., y = array([2, 4, ...]); y.shape = (n,)
If the response variable is multi-dimensional, then y is a rank-2
array, i.e., y = array([[2, 4, ...], [3, 6, ...]]); y.shape =
(q, n) where q is the dimensionality of the response variable.
beta -- rank-1 array of length p where p is the number of parameters;
i.e. beta = array([B_1, B_2, ..., B_p])
fjacb -- if the response variable is multi-dimensional, then the
return array's shape is (q, p, n) such that fjacb(x,beta)[l,k,i] =
@f_l(X,B)/@B_k evaluated at the i'th data point. If q == 1, then
the return array is only rank-2 and with shape (p, n).
fjacd -- as with fjacb, only the return array's shape is (q, m, n)
such that fjacd(x,beta)[l,j,i] = @f_l(X,B)/@X_j at the i'th data
point. If q == 1, then the return array's shape is (m, n). If
m == 1, the shape is (q, n). If m == q == 1, the shape is (n,).
"""
def __init__(self, fcn, fjacb=None, fjacd=None,
extra_args=None, estimate=None, implicit=0, meta=None):
self.fcn = fcn
self.fjacb = fjacb
self.fjacd = fjacd
if extra_args is not None:
extra_args = tuple(extra_args)
self.extra_args = extra_args
self.estimate = estimate
self.implicit = implicit
self.meta = meta
def set_meta(self, **kwds):
""" Update the metadata dictionary with the keywords and data provided
here.
Examples
--------
set_meta(name="Exponential", equation="y = a exp(b x) + c")
"""
self.meta.update(kwds)
def __getattr__(self, attr):
""" Dispatch attribute access to the metadata.
"""
if attr in self.meta.keys():
return self.meta[attr]
else:
raise AttributeError("'%s' not in metadata" % attr)
class Output(object):
"""
The Output class stores the output of an ODR run.
Takes one argument for initialization, the return value from the
function `odr`.
Attributes
----------
beta : ndarray
Estimated parameter values, of shape (q,).
sd_beta : ndarray
Standard errors of the estimated parameters, of shape (p,).
cov_beta : ndarray
Covariance matrix of the estimated parameters, of shape (p,p).
delta : ndarray, optional
Array of estimated errors in input variables, of same shape as `x`.
eps : ndarray, optional
Array of estimated errors in response variables, of same shape as `y`.
xplus : ndarray, optional
Array of ``x + delta``.
y : ndarray, optional
Array ``y = fcn(x + delta)``.
res_var : float, optional
Residual variance.
sum_sqare : float, optional
Sum of squares error.
sum_square_delta : float, optional
Sum of squares of delta error.
sum_square_eps : float, optional
Sum of squares of eps error.
inv_condnum : float, optional
Inverse condition number (cf. ODRPACK UG p. 77).
rel_error : float, optional
Relative error in function values computed within fcn.
work : ndarray, optional
Final work array.
work_ind : dict, optional
Indices into work for drawing out values (cf. ODRPACK UG p. 83).
info : int, optional
Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38).
stopreason : list of str, optional
`info` interpreted into English.
Notes
-----
The attributes listed as "optional" above are only present if `odr` was run
with ``full_output=1``.
"""
def __init__(self, output):
self.beta = output[0]
self.sd_beta = output[1]
self.cov_beta = output[2]
if len(output) == 4:
# full output
self.__dict__.update(output[3])
self.stopreason = _report_error(self.info)
def pprint(self):
""" Pretty-print important results.
"""
print 'Beta:', self.beta
print 'Beta Std Error:', self.sd_beta
print 'Beta Covariance:', self.cov_beta
if hasattr(self, 'info'):
print 'Residual Variance:',self.res_var
print 'Inverse Condition #:', self.inv_condnum
print 'Reason(s) for Halting:'
for r in self.stopreason:
print ' %s' % r
class ODR(object):
"""
The ODR class gathers all information and coordinates the running of the
main fitting routine.
Members of instances of the ODR class have the same names as the arguments
to the initialization routine.
Parameters
----------
data : Data class instance
instance of the Data class
model : Model class instance
instance of the Model class
beta0 : array_like of rank-1
a rank-1 sequence of initial parameter values. Optional if
model provides an "estimate" function to estimate these values.
delta0 : array_like of floats of rank-1, optional
a (double-precision) float array to hold the initial values of
the errors in the input variables. Must be same shape as data.x
ifixb : array_like of ints of rank-1, optional
sequence of integers with the same length as beta0 that determines
which parameters are held fixed. A value of 0 fixes the parameter,
a value > 0 makes the parameter free.
ifixx : array_like of ints with same shape as data.x, optional
an array of integers with the same shape as data.x that determines
which input observations are treated as fixed. One can use a sequence
of length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
job : int, optional
an integer telling ODRPACK what tasks to perform. See p. 31 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_job post-initialization for a more readable interface.
iprint : int, optional
an integer telling ODRPACK what to print. See pp. 33-34 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_iprint post-initialization for a more readable interface.
errfile : str, optional
string with the filename to print ODRPACK errors to. *Do Not Open
This File Yourself!*
rptfile : str, optional
string with the filename to print ODRPACK summaries to. *Do Not
Open This File Yourself!*
ndigit : int, optional
integer specifying the number of reliable digits in the computation
of the function.
taufac : float, optional
float specifying the initial trust region. The default value is 1.
The initial trust region is equal to taufac times the length of the
first computed Gauss-Newton step. taufac must be less than 1.
sstol : float, optional
float specifying the tolerance for convergence based on the relative
change in the sum-of-squares. The default value is eps**(1/2) where eps
is the smallest value such that 1 + eps > 1 for double precision
computation on the machine. sstol must be less than 1.
partol : float, optional
float specifying the tolerance for convergence based on the relative
change in the estimated parameters. The default value is eps**(2/3) for
explicit models and eps**(1/3) for implicit models. partol must be less
than 1.
maxit : int, optional
integer specifying the maximum number of iterations to perform. For
first runs, maxit is the total number of iterations performed and
defaults to 50. For restarts, maxit is the number of additional
iterations to perform and defaults to 10.
stpb : array_like, optional
sequence (len(stpb) == len(beta0)) of relative step sizes to compute
finite difference derivatives wrt the parameters.
stpd : optional
array (stpd.shape == data.x.shape or stpd.shape == (m,)) of relative
step sizes to compute finite difference derivatives wrt the input
variable errors. If stpd is a rank-1 array with length m (the
dimensionality of the input variable), then the values are broadcast to
all observations.
sclb : array_like, optional
sequence (len(stpb) == len(beta0)) of scaling factors for the
parameters. The purpose of these scaling factors are to scale all of
the parameters to around unity. Normally appropriate scaling factors
are computed if this argument is not specified. Specify them yourself
if the automatic procedure goes awry.
scld : array_like, optional
array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling
factors for the *errors* in the input variables. Again, these factors
are automatically computed if you do not provide them. If scld.shape ==
(m,), then the scaling factors are broadcast to all observations.
work : ndarray, optional
array to hold the double-valued working data for ODRPACK. When
restarting, takes the value of self.output.work.
iwork : ndarray, optional
array to hold the integer-valued working data for ODRPACK. When
restarting, takes the value of self.output.iwork.
output : Output class instance
an instance if the Output class containing all of the returned
data from an invocation of ODR.run() or ODR.restart()
"""
def __init__(self, data, model, beta0=None, delta0=None, ifixb=None,
ifixx=None, job=None, iprint=None, errfile=None, rptfile=None,
ndigit=None, taufac=None, sstol=None, partol=None, maxit=None,
stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None):
self.data = data
self.model = model
if beta0 is None:
if self.model.estimate is not None:
self.beta0 = _conv(self.model.estimate(self.data))
else:
raise ValueError(
"must specify beta0 or provide an estimater with the model"
)
else:
self.beta0 = _conv(beta0)
self.delta0 = _conv(delta0)
# These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit
# platforms.
# XXX: some other FORTRAN compilers may not agree.
self.ifixx = _conv(ifixx, dtype=numpy.int32)
self.ifixb = _conv(ifixb, dtype=numpy.int32)
self.job = job
self.iprint = iprint
self.errfile = errfile
self.rptfile = rptfile
self.ndigit = ndigit
self.taufac = taufac
self.sstol = sstol
self.partol = partol
self.maxit = maxit
self.stpb = _conv(stpb)
self.stpd = _conv(stpd)
self.sclb = _conv(sclb)
self.scld = _conv(scld)
self.work = _conv(work)
self.iwork = _conv(iwork)
self.output = None
self._check()
def _check(self):
""" Check the inputs for consistency, but don't bother checking things
that the builtin function odr will check.
"""
x_s = list(self.data.x.shape)
if isinstance(self.data.y, numpy.ndarray):
y_s = list(self.data.y.shape)
if self.model.implicit:
raise odr_error("an implicit model cannot use response data")
else:
# implicit model with q == self.data.y
y_s = [self.data.y, x_s[-1]]
if not self.model.implicit:
raise odr_error("an explicit model needs response data")
self.set_job(fit_type=1)
if x_s[-1] != y_s[-1]:
raise odr_error("number of observations do not match")
n = x_s[-1]
if len(x_s) == 2:
m = x_s[0]
else:
m = 1
if len(y_s) == 2:
q = y_s[0]
else:
q = 1
p = len(self.beta0)
# permissible output array shapes
fcn_perms = [(q, n)]
fjacd_perms = [(q, m, n)]
fjacb_perms = [(q, p, n)]
if q == 1:
fcn_perms.append((n,))
fjacd_perms.append((m, n))
fjacb_perms.append((p, n))
if m == 1:
fjacd_perms.append((q, n))
if p == 1:
fjacb_perms.append((q, n))
if m == q == 1:
fjacd_perms.append((n,))
if p == q == 1:
fjacb_perms.append((n,))
# try evaluating the supplied functions to make sure they provide
# sensible outputs
arglist = (self.beta0, self.data.x)
if self.model.extra_args is not None:
arglist = arglist + self.model.extra_args
res = self.model.fcn(*arglist)
if res.shape not in fcn_perms:
print res.shape
print fcn_perms
raise odr_error("fcn does not output %s-shaped array" % y_s)
if self.model.fjacd is not None:
res = self.model.fjacd(*arglist)
if res.shape not in fjacd_perms:
raise odr_error(
"fjacd does not output %s-shaped array" % (q, m, n))
if self.model.fjacb is not None:
res = self.model.fjacb(*arglist)
if res.shape not in fjacb_perms:
raise odr_error(
"fjacb does not output %s-shaped array" % (q, p, n))
# check shape of delta0
if self.delta0 is not None and self.delta0.shape != self.data.x.shape:
raise odr_error(
"delta0 is not a %s-shaped array" % self.data.x.shape)
def _gen_work(self):
""" Generate a suitable work array if one does not already exist.
"""
n = self.data.x.shape[-1]
p = self.beta0.shape[0]
if len(self.data.x.shape) == 2:
m = self.data.x.shape[0]
else:
m = 1
if self.model.implicit:
q = self.data.y
elif len(self.data.y.shape) == 2:
q = self.data.y.shape[0]
else:
q = 1
if self.data.we is None:
ldwe = ld2we = 1
elif len(self.data.we.shape) == 3:
ld2we, ldwe = self.data.we.shape[1:]
else:
# Okay, this isn't precisely right, but for this calculation,
# it's fine
ldwe = 1
ld2we = self.data.we.shape[1]
if self.job % 10 < 2:
# ODR not OLS
lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p +
2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q)
else:
# OLS not ODR
lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p +
5*q + q*(p+m) + ldwe*ld2we*q)
if isinstance(self.work, numpy.ndarray) and self.work.shape == (lwork,)\
and self.work.dtype.str.endswith('f8'):
# the existing array is fine
return
else:
self.work = numpy.zeros((lwork,), float)
def set_job(self, fit_type=None, deriv=None, var_calc=None,
del_init=None, restart=None):
"""
Sets the "job" parameter is a hopefully comprehensible way.
If an argument is not specified, then the value is left as is. The
default value from class initialization is for all of these options set
to 0.
Parameters
----------
fit_type : {0, 1, 2} int
0 -> explicit ODR
1 -> implicit ODR
2 -> ordinary least-squares
deriv : {0, 1, 2, 3} int
0 -> forward finite differences
1 -> central finite differences
2 -> user-supplied derivatives (Jacobians) with results
checked by ODRPACK
3 -> user-supplied derivatives, no checking
var_calc : {0, 1, 2} int
0 -> calculate asymptotic covariance matrix and fit
parameter uncertainties (V_B, s_B) using derivatives
recomputed at the final solution
1 -> calculate V_B and s_B using derivatives from last iteration
2 -> do not calculate V_B and s_B
del_init : {0, 1} int
0 -> initial input variable offsets set to 0
1 -> initial offsets provided by user in variable "work"
restart : {0, 1} int
0 -> fit is not a restart
1 -> fit is a restart
Notes
-----
The permissible values are different from those given on pg. 31 of the
ODRPACK User's Guide only in that one cannot specify numbers greater than
the last value for each variable.
If one does not supply functions to compute the Jacobians, the fitting
procedure will change deriv to 0, finite differences, as a default. To
initialize the input variable offsets by yourself, set del_init to 1 and
put the offsets into the "work" variable correctly.
"""
if self.job is None:
job_l = [0, 0, 0, 0, 0]
else:
job_l = [self.job / 10000 % 10,
self.job / 1000 % 10,
self.job / 100 % 10,
self.job / 10 % 10,
self.job % 10]
if fit_type in (0, 1, 2):
job_l[4] = fit_type
if deriv in (0, 1, 2, 3):
job_l[3] = deriv
if var_calc in (0, 1, 2):
job_l[2] = var_calc
if del_init in (0, 1):
job_l[1] = del_init
if restart in (0, 1):
job_l[0] = restart
self.job = (job_l[0]*10000 + job_l[1]*1000 +
job_l[2]*100 + job_l[3]*10 + job_l[4])
def set_iprint(self, init=None, so_init=None,
iter=None, so_iter=None, iter_step=None, final=None, so_final=None):
""" Set the iprint parameter for the printing of computation reports.
If any of the arguments are specified here, then they are set in the
iprint member. If iprint is not set manually or with this method, then
ODRPACK defaults to no printing. If no filename is specified with the
member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to
print to stdout in addition to the specified filename by setting the
so_* arguments to this function, but one cannot specify to print to
stdout but not a file since one can do that by not specifying a rptfile
filename.
There are three reports: initialization, iteration, and final reports.
They are represented by the arguments init, iter, and final
respectively. The permissible values are 0, 1, and 2 representing "no
report", "short report", and "long report" respectively.
The argument iter_step (0 <= iter_step <= 9) specifies how often to make
the iteration report; the report will be made for every iter_step'th
iteration starting with iteration one. If iter_step == 0, then no
iteration report is made, regardless of the other arguments.
If the rptfile is None, then any so_* arguments supplied will raise an
exception.
"""
if self.iprint is None:
self.iprint = 0
ip = [self.iprint / 1000 % 10,
self.iprint / 100 % 10,
self.iprint / 10 % 10,
self.iprint % 10]
# make a list to convert iprint digits to/from argument inputs
# rptfile, stdout
ip2arg = [[0, 0], # none, none
[1, 0], # short, none
[2, 0], # long, none
[1, 1], # short, short
[2, 1], # long, short
[1, 2], # short, long
[2, 2]] # long, long
if (self.rptfile is None and
(so_init is not None or
so_iter is not None or
so_final is not None)):
raise odr_error(
"no rptfile specified, cannot output to stdout twice")
iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]]
if init is not None:
iprint_l[0] = init
if so_init is not None:
iprint_l[1] = so_init
if iter is not None:
iprint_l[2] = iter
if so_iter is not None:
iprint_l[3] = so_iter
if final is not None:
iprint_l[4] = final
if so_final is not None:
iprint_l[5] = so_final
if iter_step in range(10):
# 0..9
ip[2] = iter_step
ip[0] = ip2arg.index(iprint_l[0:2])
ip[1] = ip2arg.index(iprint_l[2:4])
ip[3] = ip2arg.index(iprint_l[4:6])
self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3]
def run(self):
""" Run the fitting routine with all of the information given.
Returns
-------
output : Output instance
This object is also assigned to the attribute .output .
"""
args = (self.model.fcn, self.beta0, self.data.y, self.data.x)
kwds = {'full_output': 1}
kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile',
'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb',
'stpd', 'sclb', 'scld', 'work', 'iwork']
if self.delta0 is not None and self.job % 1000 / 10 == 1:
# delta0 provided and fit is not a restart
self._gen_work()
d0 = numpy.ravel(self.delta0)
self.work[:len(d0)] = d0
# set the kwds from other objects explicitly
if self.model.fjacb is not None:
kwds['fjacb'] = self.model.fjacb
if self.model.fjacd is not None:
kwds['fjacd'] = self.model.fjacd
if self.data.we is not None:
kwds['we'] = self.data.we
if self.data.wd is not None:
kwds['wd'] = self.data.wd
if self.model.extra_args is not None:
kwds['extra_args'] = self.model.extra_args
# implicitly set kwds from self's members
for attr in kwd_l:
obj = getattr(self, attr)
if obj is not None:
kwds[attr] = obj
self.output = Output(apply(odr, args, kwds))
return self.output
def restart(self, iter=None):
""" Restarts the run with iter more iterations.
Parameters
----------
iter : int, optional
ODRPACK's default for the number of new iterations is 10.
Returns
-------
output : Output instance
This object is also assigned to the attribute .output .
"""
if self.output is None:
raise odr_error("cannot restart: run() has not been called before")
self.set_job(restart=1)
self.work = self.output.work
self.iwork = self.output.iwork
self.maxit = iter
return self.run()
|
ygenc/onlineLDA
|
onlineldavb_new/build/scipy/scipy/odr/odrpack.py
|
Python
|
gpl-3.0
| 39,749
| 0.001157
|
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import clv_medicament_template_wkf
|
CLVsol/odoo_addons
|
clv_medicament_template/wkf/__init__.py
|
Python
|
agpl-3.0
| 1,439
| 0.011814
|
#
# [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#/
# A DFA walker that knows how to dump them to serialized strings.#/
from io import StringIO
from antlr4.Utils import str_list
class DFASerializer(object):
def __init__(self, dfa, literalNames=None, symbolicNames=None):
self.dfa = dfa
self.literalNames = literalNames
self.symbolicNames = symbolicNames
def __str__(self):
return unicode(self)
def __unicode__(self):
if self.dfa.s0 is None:
return None
with StringIO() as buf:
for s in self.dfa.sortedStates():
n = 0
if s.edges is not None:
n = len(s.edges)
for i in range(0, n):
t = s.edges[i]
if t is not None and t.stateNumber != 0x7FFFFFFF:
buf.write(self.getStateString(s))
label = self.getEdgeLabel(i)
buf.write(u"-")
buf.write(label)
buf.write(u"->")
buf.write(self.getStateString(t))
buf.write(u'\n')
output = buf.getvalue()
if len(output)==0:
return None
else:
return output
def getEdgeLabel(self, i):
if i==0:
return u"EOF"
if self.literalNames is not None and i<=len(self.literalNames):
return self.literalNames[i-1]
elif self.symbolicNames is not None and i<=len(self.symbolicNames):
return self.symbolicNames[i-1]
else:
return unicode(i-1)
def getStateString(self, s):
n = s.stateNumber
baseStateStr = ( u":" if s.isAcceptState else u"") + u"s" + unicode(n) + \
( u"^" if s.requiresFullContext else u"")
if s.isAcceptState:
if s.predicates is not None:
return baseStateStr + u"=>" + str_list(s.predicates)
else:
return baseStateStr + u"=>" + unicode(s.prediction)
else:
return baseStateStr
class LexerDFASerializer(DFASerializer):
def __init__(self, dfa):
super(LexerDFASerializer, self).__init__(dfa, None)
def getEdgeLabel(self, i):
return u"'" + unichr(i) + u"'"
|
sidhart/antlr4
|
runtime/Python2/src/antlr4/dfa/DFASerializer.py
|
Python
|
bsd-3-clause
| 3,848
| 0.002339
|
import requests
import json
class DisqusAPI(object):
"""
Lightweight solution to make API calls to Disqus:
More info:
https://disqus.com/api/docs
"""
def __init__(self,
api_key,
api_secret,
version='3.0',
formats='json'
):
self.api_key = api_key
self.api_secret = api_secret
self.version = version
self.formats = formats
def get(self, method, **kwargs):
"""
Make get requests to retrieve data from Disqus
"""
endpoint = 'https://disqus.com/api/{version}/{method}.{formats}'
url = endpoint.format(
version=self.version,
method=method.replace('.', '/'),
formats=self.formats
)
kwargs.update({
'api_key': self.api_key,
'api_secret': self.api_secret,
})
response = requests.get(url, params=kwargs)
# TODO: support other formats like rss
if self.formats == 'json':
return json.loads(response.content.decode())
|
shalakhin/disqus-api
|
disqus_api/api.py
|
Python
|
mit
| 1,082
| 0.005545
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mulitple rpc user config option rpcauth
#
from test_framework.test_framework import AureusTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (AureusTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Append rpcauth to aureus.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "aureus.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urlparse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + base64.b64encode(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
hideoussquid/aureus-12-bitcore
|
qa/rpc-tests/multi_rpc.py
|
Python
|
mit
| 4,609
| 0.005424
|
# Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <eantones@nuobit.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import fields, models
class ResPartner(models.Model):
_inherit = "res.partner"
sale_journal_id = fields.Many2one(
"account.journal", "Default journal", domain=[("type", "=", "sale")]
)
purchase_journal_id = fields.Many2one(
"account.journal", "Default journal", domain=[("type", "=", "purchase")]
)
|
nuobit/odoo-addons
|
partner_default_journal/models/res_partner.py
|
Python
|
agpl-3.0
| 511
| 0.001957
|
import os
import pysam
import unittest
from TestUtils import checkFieldEqual
import copy
SAMTOOLS = "samtools"
WORKDIR = "pysam_test_work"
DATADIR = "pysam_data"
class ReadTest(unittest.TestCase):
def buildRead(self):
'''build an example read.'''
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 10
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((0, 10), (2, 1), (0, 9), (1, 1), (0, 20))
a.next_reference_id = 0
a.next_reference_start = 200
a.template_length = 167
a.query_qualities = pysam.fromQualityString("1234") * 10
# todo: create tags
return a
class TestAlignedSegment(ReadTest):
'''tests to check if aligned read can be constructed
and manipulated.
'''
def testEmpty(self):
a = pysam.AlignedSegment()
self.assertEqual(a.query_name, None)
self.assertEqual(a.query_sequence, None)
self.assertEqual(pysam.toQualityString(a.query_qualities), None)
self.assertEqual(a.flag, 0)
self.assertEqual(a.reference_id, 0)
self.assertEqual(a.mapping_quality, 0)
self.assertEqual(a.cigartuples, None)
self.assertEqual(a.tags, [])
self.assertEqual(a.next_reference_id, 0)
self.assertEqual(a.next_reference_start, 0)
self.assertEqual(a.template_length, 0)
def testStrOfEmptyRead(self):
a = pysam.AlignedSegment()
s = str(a)
self.assertEqual(
"None\t0\t0\t0\t0\tNone\t0\t0\t0\tNone\tNone\t[]",
s)
def testSettingTagInEmptyRead(self):
'''see issue 62'''
a = pysam.AlignedSegment()
a.tags = (("NM", 1),)
a.query_qualities = None
self.assertEqual(a.tags, [("NM", 1), ])
def testCompare(self):
'''check comparison functions.'''
a = self.buildRead()
b = self.buildRead()
self.assertEqual(0, a.compare(b))
self.assertEqual(0, b.compare(a))
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
b.tid = 2
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def testHashing(self):
a = self.buildRead()
b = self.buildRead()
self.assertEqual(hash(a), hash(b))
b.tid = 2
self.assertNotEqual(hash(a), hash(b))
def testUpdate(self):
'''check if updating fields affects other variable length data
'''
a = self.buildRead()
b = self.buildRead()
# check qname
b.query_name = "read_123"
checkFieldEqual(self, a, b, "query_name")
b.query_name = "read_12345678"
checkFieldEqual(self, a, b, "query_name")
b.query_name = "read_12345"
checkFieldEqual(self, a, b)
# check cigar
b.cigartuples = ((0, 10), )
checkFieldEqual(self, a, b, "cigartuples")
b.cigartuples = ((0, 10), (2, 1), (0, 10))
checkFieldEqual(self, a, b, "cigartuples")
b.cigartuples = ((0, 10), (2, 1), (0, 9), (1, 1), (0, 20))
checkFieldEqual(self, a, b)
# check seq
b.query_sequence = "ACGT"
checkFieldEqual(self,
a, b,
("query_sequence", "query_qualities", "query_length"))
b.query_sequence = "ACGT" * 3
checkFieldEqual(self,
a, b,
("query_sequence", "query_qualities", "query_length"))
b.query_sequence = "ACGT" * 10
checkFieldEqual(self, a, b, ("query_qualities",))
# reset qual
b = self.buildRead()
# check flags:
for x in (
"is_paired", "is_proper_pair",
"is_unmapped", "mate_is_unmapped",
"is_reverse", "mate_is_reverse",
"is_read1", "is_read2",
"is_secondary", "is_qcfail",
"is_duplicate", "is_supplementary"):
setattr(b, x, True)
self.assertEqual(getattr(b, x), True)
checkFieldEqual(self, a, b, ("flag", x,))
setattr(b, x, False)
self.assertEqual(getattr(b, x), False)
checkFieldEqual(self, a, b)
def testUpdate2(self):
'''issue 135: inplace update of sequence and quality score.
This does not work as setting the sequence will erase
the quality scores.
'''
a = self.buildRead()
a.query_sequence = a.query_sequence[5:10]
self.assertEqual(pysam.toQualityString(a.query_qualities), None)
a = self.buildRead()
s = pysam.toQualityString(a.query_qualities)
a.query_sequence = a.query_sequence[5:10]
a.query_qualities = pysam.fromQualityString(s[5:10])
self.assertEqual(pysam.toQualityString(a.query_qualities), s[5:10])
def testLargeRead(self):
'''build an example read.'''
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 200
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((0, 4 * 200), )
a.next_reference_id = 0
a.next_reference_start = 200
a.template_length = 167
a.query_qualities = pysam.fromQualityString("1234") * 200
return a
def testUpdateTlen(self):
'''check if updating tlen works'''
a = self.buildRead()
oldlen = a.template_length
oldlen *= 2
a.template_length = oldlen
self.assertEqual(a.template_length, oldlen)
def testPositions(self):
a = self.buildRead()
self.assertEqual(a.get_reference_positions(),
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59])
self.assertEqual(a.get_aligned_pairs(),
[(0, 20), (1, 21), (2, 22), (3, 23), (4, 24),
(5, 25), (6, 26), (7, 27), (8, 28), (9, 29),
(None, 30),
(10, 31), (11, 32), (12, 33), (13, 34), (14, 35),
(15, 36), (16, 37), (17, 38), (18, 39), (19, None),
(20, 40), (21, 41), (22, 42), (23, 43), (24, 44),
(25, 45), (26, 46), (27, 47), (28, 48), (29, 49),
(30, 50), (31, 51), (32, 52), (33, 53), (34, 54),
(35, 55), (36, 56), (37, 57), (38, 58), (39, 59)])
self.assertEqual(
a.get_reference_positions(),
[x[1] for x in a.get_aligned_pairs()
if x[0] is not None and x[1] is not None])
# alen is the length of the aligned read in genome
self.assertEqual(a.reference_length,
a.get_aligned_pairs()[-1][0] + 1)
# aend points to one beyond last aligned base in ref
self.assertEqual(a.get_reference_positions()[-1],
a.reference_end - 1)
def testFullReferencePositions(self):
'''see issue 26'''
a = self.buildRead()
a.cigar = [(4, 30), (0, 20), (1, 3), (0, 47)]
self.assertEqual(100,
len(a.get_reference_positions(full_length=True)))
def testBlocks(self):
a = self.buildRead()
self.assertEqual(a.get_blocks(),
[(20, 30), (31, 40), (40, 60)])
def test_get_aligned_pairs_soft_clipping(self):
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 10
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((4, 2), (0, 35), (4, 3))
a.query_qualities = pysam.fromQualityString("1234") * 10
self.assertEqual(a.get_aligned_pairs(),
[(0, None), (1, None)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 35), range(20, 20 + 35))] +
[(37, None), (38, None), (39, None)]
)
self.assertEqual(a.get_aligned_pairs(True),
# [(0, None), (1, None)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 35), range(20, 20 + 35))]
# [(37, None), (38, None), (39, None)]
)
def test_get_aligned_pairs_hard_clipping(self):
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 10
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((5, 2), (0, 35), (5, 3))
a.query_qualities = pysam.fromQualityString("1234") * 10
self.assertEqual(a.get_aligned_pairs(),
# No seq, no seq pos
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 35), range(20, 20 + 35))])
self.assertEqual(a.get_aligned_pairs(True),
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 35), range(20, 20 + 35))])
def test_get_aligned_pairs_skip(self):
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 10
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((0, 2), (3, 100), (0, 38))
a.query_qualities = pysam.fromQualityString("1234") * 10
self.assertEqual(a.get_aligned_pairs(),
[(0, 20), (1, 21)] +
[(None, refpos) for refpos in range(22, 22 + 100)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 38),
range(20 + 2 + 100, 20 + 2 + 100 + 38))])
self.assertEqual(a.get_aligned_pairs(True),
[(0, 20), (1, 21)] +
# [(None, refpos) for refpos in range(21, 21+100)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 38),
range(20 + 2 + 100, 20 + 2 + 100 + 38))])
def test_get_aligned_pairs_match_mismatch(self):
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 10
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((7, 20), (8, 20))
a.query_qualities = pysam.fromQualityString("1234") * 10
self.assertEqual(a.get_aligned_pairs(),
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 40), range(20, 20 + 40))])
self.assertEqual(a.get_aligned_pairs(True),
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 40), range(20, 20 + 40))])
def test_get_aligned_pairs_padding(self):
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 10
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((7, 20), (6, 1), (8, 19))
a.query_qualities = pysam.fromQualityString("1234") * 10
def inner():
a.get_aligned_pairs()
# padding is not bein handled right now
self.assertRaises(NotImplementedError, inner)
class TestTags(ReadTest):
def testMissingTag(self):
a = self.buildRead()
self.assertRaises(KeyError, a.get_tag, "XP")
def testEmptyTag(self):
a = self.buildRead()
self.assertRaises(KeyError, a.get_tag, "XT")
def testSetTag(self):
a = self.buildRead()
self.assertEqual(False, a.has_tag("NM"))
a.set_tag("NM", 2)
self.assertEqual(True, a.has_tag("NM"))
self.assertEqual(a.get_tag("NM"), 2)
a.set_tag("NM", 3)
self.assertEqual(a.get_tag("NM"), 3)
a.set_tag("NM", None)
self.assertEqual(False, a.has_tag("NM"))
# check if deleting a non-existing tag is fine
a.set_tag("NM", None)
def testAddTagsType(self):
a = self.buildRead()
a.tags = None
self.assertEqual(a.tags, [])
a.setTag('X1', 5.0)
a.setTag('X2', "5.0")
a.setTag('X3', 5)
self.assertEqual(sorted(a.tags),
sorted([('X1', 5.0),
('X2', "5.0"),
('X3', 5)]))
# test setting float for int value
a.setTag('X4', 5, value_type='d')
self.assertEqual(sorted(a.tags),
sorted([('X1', 5.0),
('X2', "5.0"),
('X3', 5),
('X4', 5.0)]))
# test setting int for float value - the
# value will be rounded.
a.setTag('X5', 5.2, value_type='i')
self.assertEqual(sorted(a.tags),
sorted([('X1', 5.0),
('X2', "5.0"),
('X3', 5),
('X4', 5.0),
('X5', 5)]))
# test setting invalid type code
self.assertRaises(ValueError, a.setTag, 'X6', 5.2, 'g')
def testTagsUpdatingFloat(self):
a = self.buildRead()
a.tags = [('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')]
self.assertEqual(a.tags,
[('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')])
a.tags += [('XC', 5.0)]
self.assertEqual(a.tags,
[('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ('XC', 5.0)])
def testAddTags(self):
a = self.buildRead()
a.tags = [('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')]
self.assertEqual(sorted(a.tags),
sorted([('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')]))
a.setTag('X1', 'C')
self.assertEqual(sorted(a.tags),
sorted([('X1', 'C'), ('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
a.setTag('X2', 5)
self.assertEqual(sorted(a.tags),
sorted([('X2', 5), ('X1', 'C'),
('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
# add with replacement
a.setTag('X2', 10)
self.assertEqual(sorted(a.tags),
sorted([('X2', 10), ('X1', 'C'),
('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
# add without replacement
a.setTag('X2', 5, replace=False)
self.assertEqual(sorted(a.tags),
sorted([('X2', 10), ('X1', 'C'),
('X2', 5),
('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
def testTagParsing(self):
'''test for tag parsing
see http://groups.google.com/group/pysam-user-group/browse_thread/thread/67ca204059ea465a
'''
samfile = pysam.AlignmentFile(
os.path.join(DATADIR, "ex8.bam"),
"rb")
for entry in samfile:
before = entry.get_tags()
entry.set_tags(before)
after = entry.get_tags()
self.assertEqual(after, before)
class TestCopy(ReadTest):
def testCopy(self):
a = self.buildRead()
b = copy.copy(a)
# check if a and be are the same
self.assertEqual(a, b)
# check if they map to different objects
a.query_name = 'ReadA'
b.query_name = 'ReadB'
self.assertEqual(a.query_name, 'ReadA')
self.assertEqual(b.query_name, 'ReadB')
def testDeepCopy(self):
a = self.buildRead()
b = copy.deepcopy(a)
# check if a and be are the same
self.assertEqual(a, b)
# check if they map to different objects
a.query_name = 'ReadA'
b.query_name = 'ReadB'
self.assertEqual(a.query_name, 'ReadA')
self.assertEqual(b.query_name, 'ReadB')
if __name__ == "__main__":
unittest.main()
|
nlhepler/pysam
|
tests/AlignedSegment_test.py
|
Python
|
mit
| 17,059
| 0.000117
|
from django import views
from django.shortcuts import render, get_object_or_404
from django.views.generic import TemplateView
from django.views.generic.edit import CreateView
from .models import *
from .forms import *
import requests
import http
from django.urls import reverse_lazy
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
class Template404(TemplateView):
template_name = "404.html"
class Home(TemplateView):
template_name = 'home/home.html'
class Topics(views.View):
def get(self, request, *args, **kwargs):
return render(request, "home/resources/topics.html", {'topics': Topic.objects.all()})
class Resources(views.View):
def get(self, request, pk, *args, **kwargs):
topic = get_object_or_404(Topic, pk=pk)
return render(request, "home/resources/resources.html", {'resources': topic.resource_set.all(), 'topic' : topic})
class BlogPostList(views.View):
def get(self, request, *args, **kwargs):
posts = BlogPost.objects.all()
return render(request, "home/blog/index.html", {'posts': posts})
class BlogPostView(views.View):
def get(self, request, pk, *args, **kwargs):
post = get_object_or_404(BlogPost, pk=pk)
return render(request, "home/blog/blog_post.html", {'post': post})
class Leaderboard(views.View):
def get(self, request, *args, **kwargs):
users = Users.objects.all()
for user in users:
connected = False
while not connected:
try:
user_name = user.github_handle
response = requests.get('https://api.github.com/search/issues?sort=created&q=author:{}&type:pr&per_page=100'.format(user_name), verify = False).json()
pr_count = 0
print(response)
for obj in response['items']:
if('pull_request' in obj):
if('2018-09-30T00:00:00Z'<obj['created_at']<'2018-10-31T23:59:59Z'):
pr_count += 1
user.pr_count = pr_count
user.save()
connected = True
except:
pass
return render(request, 'home/leaderboard.html', {'users': users})
class RegisterUser(CreateView):
form_class = RegisterUserForm
template_name = "home/registeruser.html"
success_url = reverse_lazy('home:home')
@csrf_exempt
def GithubEmailCheck(request):
github_handle = request.POST.get('github_handle')
email = request.POST.get('email')
print("Received ", github_handle)
users = Users.objects.all()
for user in users:
if user.github_handle == github_handle:
return JsonResponse({'message' : 'Duplicate Github Handle'})
if user.email == email:
return JsonResponse({'message' : 'Duplicate Email'})
return JsonResponse({'message' : 'New'})
@csrf_exempt
def GithubCheck(request):
github_handle = request.POST.get('github_handle')
response = requests.get("https://api.github.com/users/{}".format(github_handle), verify = False).json()
print("https://api.github.com/users/{}".format(github_handle))
if ('login' in response):
print("Found")
return JsonResponse({'message' : 'Found'})
else:
return JsonResponse({'message' : 'Not Found'})
|
pclubuiet/website
|
home/views.py
|
Python
|
gpl-3.0
| 3,396
| 0.008539
|
import sys
r, c = map(int, input().split())
while r and c:
lines = [input().strip() for i in range(r)]
rotatedLines = []
for i in range(c):
rotatedLines.append("".join([lines[j][i] for j in range(r)]))
rotatedLines.sort(key=lambda s: s.lower())
for i in range(r):
print("".join([rotatedLines[j][i] for j in range(c)]))
print()
r, c = map(int, input().split())
|
SirDavidLudwig/KattisSolutions
|
problems/sidewayssorting/sidewayssorting.py
|
Python
|
gpl-3.0
| 372
| 0.024194
|
import json
import sqlite3
def get_room(id, dbfile):
ret = None
con = sqlite3.connect(dbfile)
for row in con.execute("select json from rooms where id=?",(id,)):
jsontext = row[0]
# Outputs the JSON response
#print("json = " + jsontext)
d = json.loads(jsontext)
d['id'] = id
ret = Room(**d)
break
con.close()
return ret
class Room():
def __init__(self, id=0, name="A room", description="An empty room", neighbors={}):
self.id = id
self.name = name
self.description = description
self.neighbors = neighbors
def _neighbor(self, direction):
if direction in self.neighbors:
return self.neighbors[direction]
else:
return None
def north(self):
return self._neighbor('n')
def south(self):
return self._neighbor('s')
def east(self):
return self._neighbor('e')
def west(self):
return self._neighbor('w')
|
mbramr/My-Zork
|
room.py
|
Python
|
mit
| 1,035
| 0.017391
|
# Generated by Django 2.0.5 on 2018-05-10 22:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("studies", "0001_initial")]
operations = [migrations.AlterModelOptions(name="extension", options={"ordering": ("-id",)})]
|
mozilla/normandy
|
normandy/studies/migrations/0002_auto_20180510_2256.py
|
Python
|
mpl-2.0
| 272
| 0.003676
|
from markupsafe import escape
import re
from pymongo.objectid import ObjectId
from pymongo.errors import InvalidId
from app.people.people_model import People
from app.board.board_model import BoardTopic, BoardNode
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from lib.filter import none2string,mentions,video, urlink
from lib.utils import html_escape, br_escape
cache_opts = {
'cache.type': 'file',
'cache.data_dir': '/tmp/caches/data',
'cache.lock_dir': '/tmp/caches/lock',
'cache.regions': 'short_term, long_term',
#'cache.short_term.type': 'ext:memcached',
#'cache.short_term.url': '127.0.0.1.11211',
'cache.short_term.type': 'file',
'cache.short_term.expire': '1200',
'cache.long_term.type': 'file',
'cache.long_term.expire': '3600',
}
cache = CacheManager(**parse_cache_config_options(cache_opts))
@cache.region('short_term', 'cached_people')
def get_cached_people(people_id):
try:
people = People.objects.with_id(people_id)
return people
except InvalidId, error:
pass
return None
def fetch_cached_people(people_id, reflush=False):
if reflush:
cache.region_invalidate(get_cached_people, None, 'cached_people', people_id)
return get_cached_people(people_id)
@cache.region('long_term', 'cached_board_topic')
def get_cached_board_topic(topic_id):
try:
topic = BoardTopic.objects.with_id(topic_id)
if topic is None:
return None
if topic.content:
topic.html_content = urlink(escape(topic.content)) #urlink((mentions(youku(escape(topic.content)) ) ) , trim_url_limit=30)
else:
topic.html_content = ''
return topic
except Exception, error:
return None
return None
def fetch_cached_board_topic(topic_id, reflush=False):
if reflush:
cache.region_invalidate(get_cached_board_topic, None, 'cached_board_topic', topic_id)
return get_cached_board_topic(topic_id)
@cache.region('long_term', 'cached_board_topic_morecontent')
def get_cached_board_topic_morecontent(topic_id):
try:
topic = fetch_cached_board_topic(topic_id)
if topic is None:
return None
html_more_content = ''
if topic.more_content:
html_more_content = br_escape(urlink(escape(topic.more_content))) #urlink((mentions(youku(escape(topic.content)) ) ) , trim_url_limit=30)
extra_content = ''
if topic.video_urls:
video_html = '<p></p>'
for url in topic.video_urls:
video_html += video(url)
extra_content = video_html
return html_more_content + extra_content
except Exception, error:
return None
return None
def fetch_cached_board_topic_morecontent(topic_id, reflush=False):
if reflush:
cache.region_invalidate(get_cached_board_topic, None, 'cached_board_topic_morecontent', topic_id)
return get_cached_board_topic_morecontent(topic_id)
@cache.region('long_term', 'cached_board_nodelist')
def get_cached_board_nodelist(cache='board_nodelist'):
try:
nodelist = BoardNode.get_top_nodes()
return list(nodelist)
except InvalidId, error:
pass
return None
def fetch_cached_board_nodelist(reflush=False):
if reflush:
cache.region_invalidate(get_cached_board_nodelist, None, 'cached_board_nodelist', 'board_nodelist')
return get_cached_board_nodelist('board_nodelist')
|
feilaoda/FlickBoard
|
project/cache/files.py
|
Python
|
mit
| 3,576
| 0.007271
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
from multiprocessing import Process
import signal
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers.io import ListenAndServ
from paddle.fluid.layers.io import Recv
from paddle.fluid.layers.io import Send
import paddle.fluid.layers.ops as ops
from paddle.fluid.transpiler.details import program_to_code
class TestProgram2Code(unittest.TestCase):
def test_print(self):
place = fluid.CPUPlace()
self.init_serv(place)
self.init_client(place, 9123)
def init_serv(self, place):
main = fluid.Program()
with fluid.program_guard(main):
serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False)
with serv.do():
out_var = main.global_block().create_var(
name="scale_0.tmp_0",
psersistable=True,
dtype="float32",
shape=[32, 32])
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
ops._scale(x=x, scale=10.0, out=out_var)
program_to_code(main)
def init_client(self, place, port):
main = fluid.Program()
with fluid.program_guard(main):
x = layers.data(
shape=[32, 32],
dtype='float32',
name='X',
append_batch_size=False)
fluid.initializer.Constant(value=2.3)(x, main.global_block())
get_var = main.global_block().create_var(
name="scale_0.tmp_0", # server side var
dtype="float32",
persistable=False,
shape=[32, 32])
fluid.initializer.Constant(value=2.3)(get_var, main.global_block())
Send("127.0.0.1:%d" % port, [x])
o = Recv("127.0.0.1:%d" % port, [get_var])
program_to_code(main)
if __name__ == "__main__":
unittest.main()
|
tensor-tang/Paddle
|
python/paddle/fluid/tests/unittests/test_program_code.py
|
Python
|
apache-2.0
| 2,769
| 0
|
__authors__ = ""
__copyright__ = "(c) 2014, pymal"
__license__ = "BSD License"
__contact__ = "Name Of Current Guardian of this file <email@address>"
USER_AGENT = 'api-indiv-0829BA2B33942A4A5E6338FE05EFB8A1'
HOST_NAME = "http://myanimelist.net"
DEBUG = False
RETRY_NUMBER = 4
RETRY_SLEEP = 1
SHORT_SITE_FORMAT_TIME = '%b %Y'
LONG_SITE_FORMAT_TIME = '%b %d, %Y'
MALAPPINFO_FORMAT_TIME = "%Y-%m-%d"
MALAPPINFO_NONE_TIME = "0000-00-00"
MALAPI_FORMAT_TIME = "%Y%m%d"
MALAPI_NONE_TIME = "00000000"
|
pymal-developers/pymal
|
pymal/consts.py
|
Python
|
bsd-3-clause
| 496
| 0
|
from unittest import TestCase
import os
from opencog.atomspace import AtomSpace, TruthValue, Atom, types
from opencog.bindlink import stub_bindlink, bindlink, single_bindlink,\
first_n_bindlink, af_bindlink, \
satisfaction_link, satisfying_set, \
satisfying_element, first_n_satisfying_set, \
execute_atom, evaluate_atom
from opencog.type_constructors import *
from opencog.utilities import initialize_opencog, finalize_opencog
from test_functions import green_count, red_count
__author__ = 'Curtis Faith'
class BindlinkTest(TestCase):
bindlink_atom = None
getlink_atom = None
atomspace = AtomSpace()
starting_size = 0
def setUp(self):
print "setUp - atomspace = ", self.atomspace
# Clear atoms from previous test
self.atomspace.clear()
# Initialize Python
initialize_opencog(self.atomspace)
set_type_ctor_atomspace(self.atomspace)
# Define several animals and something of a different type as well
InheritanceLink( ConceptNode("Frog"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Zebra"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Deer"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Spaceship"), ConceptNode("machine"))
# Define a graph search query
self.bindlink_atom = \
BindLink(
# The variable node to be grounded.
VariableNode("$var"),
# The pattern to be grounded.
InheritanceLink(
VariableNode("$var"),
ConceptNode("animal")
),
# The grounding to be returned.
VariableNode("$var")
# bindlink needs a handle
)
# Define a pattern to be grounded
self.getlink_atom = \
GetLink(
InheritanceLink(
VariableNode("$var"),
ConceptNode("animal")
)
)
# Remember the starting atomspace size.
self.starting_size = self.atomspace.size()
def tearDown(self):
print "tearDown - atomspace = ", self.atomspace
# Can't do this; finalize can be called only once, ever, and
# then never again. The second call will never follow through.
# Also, cannot create and delete atomspaces here; this will
# confuse the PythonEval singletonInstance.
# finalize_opencog()
# del self.atomspace
def test_stub_bindlink(self):
# Remember the starting atomspace size. This test should not
# change the atomspace.
starting_size = self.atomspace.size()
# Run bindlink.
atom = stub_bindlink(self.atomspace, self.bindlink_atom)
self.assertTrue(atom is not None)
# Check the ending atomspace size, it should be the same.
ending_size = self.atomspace.size()
self.assertEquals(ending_size, starting_size)
def _check_result_setlink(self, atom, expected_arity):
# Check if the atom is a SetLink
self.assertTrue(atom is not None)
self.assertEquals(atom.type, types.SetLink)
# Check the ending atomspace size, it should have added one SetLink.
ending_size = self.atomspace.size()
self.assertEquals(ending_size, self.starting_size + 1)
# The SetLink should have expected_arity items in it.
self.assertEquals(atom.arity, expected_arity)
def test_bindlink(self):
atom = bindlink(self.atomspace, self.bindlink_atom)
self._check_result_setlink(atom, 3)
def test_single_bindlink(self):
atom = single_bindlink(self.atomspace, self.bindlink_atom)
self._check_result_setlink(atom, 1)
def test_first_n_bindlink(self):
atom = first_n_bindlink(self.atomspace, self.bindlink_atom, 5)
self._check_result_setlink(atom, 3)
def test_af_bindlink(self):
atom = af_bindlink(self.atomspace, self.bindlink_atom)
# The SetLink is empty. ??? Should it be.
self._check_result_setlink(atom, 0)
def test_satisfying_set(self):
atom = satisfying_set(self.atomspace, self.getlink_atom)
self._check_result_setlink(atom, 3)
def test_satisfying_element(self):
atom = satisfying_element(self.atomspace, self.getlink_atom)
self._check_result_setlink(atom, 1)
def test_first_n_satisfying_set(self):
atom = first_n_satisfying_set(self.atomspace, self.getlink_atom, 5)
self._check_result_setlink(atom, 3)
def test_satisfy(self):
satisfaction_atom = SatisfactionLink(
VariableList(), # no variables
SequentialAndLink(
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("green light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("green light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("red light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("traffic ticket")
)
)
)
)
atom = satisfaction_link(self.atomspace, satisfaction_atom)
self.assertTrue(atom is not None and atom.mean <= 0.5)
self.assertEquals(green_count(), 2)
self.assertEquals(red_count(), 1)
def test_execute_atom(self):
result = execute_atom(self.atomspace,
ExecutionOutputLink(
GroundedSchemaNode("py: test_functions.add_link"),
ListLink(
ConceptNode("one"),
ConceptNode("two")
)
)
)
list_link = ListLink(
ConceptNode("one"),
ConceptNode("two")
)
self.assertEquals(result, list_link)
def test_evaluate_atom(self):
result = evaluate_atom(self.atomspace,
EvaluationLink(
GroundedPredicateNode("py: test_functions.bogus_tv"),
ListLink(
ConceptNode("one"),
ConceptNode("two")
)
)
)
self.assertEquals(result, TruthValue(0.6, 0.234))
|
inflector/atomspace
|
tests/cython/bindlink/test_bindlink.py
|
Python
|
agpl-3.0
| 7,002
| 0.001285
|
#!/usr/bin/env python
# -*- coding: <encoding name> -*-
__author__ = "i_pogorelko"
__email__ = "i.pogorelko@gmail.com"
__date__ = "2014-11-16"
text='Proin eget tortor risus. Cras ultricies ligula sed magna dictum porta.\
Donec rutrum congue leo eget malesuada.'
def percentage_1(text):
print ''
print 'input: ', text
text = text.lower()
text2 = ''
for x in text:
if ord(x) >= ord('a') and ord(x) <= ord('z'):
text2 = text2 + x
d = {}
m = 0
for j in text2:
if d.has_key(j):
d[j] += 1.0
else:
d[j] = 1.0
m += 1
for key in d:
d[key] = float("%.1f" % ((d[key]/m)*100))
print '\noutput: ', d
return d
def percentage_2(text):
return percentage_1(text)
percentage_1(text)
|
pybursa/homeworks
|
i_pogorelko/hw4_i_pogorelko/hw4_solution1.py
|
Python
|
gpl-2.0
| 804
| 0.008706
|
#parser_testing.py
import os, sys, re, StringIO
sys.path.append('/Users/Jason/Dropbox/JournalMap/scripts/GeoParsers')
#from jmap_geoparser_re import *
from jmap_geoparser import *
#def test_parsing():
test = "blah blah blah 45º 23' 12'', 123º 23' 56'' and blah blah blah 32º21'59''N, 115º 23' 14''W blah blah blah"
coords = coordinateParser.searchString(test)
for coord in coords:
assert coordinate(coord).calcDD(), "Coordinate Transform Error for "+str(coord)
test = "45.234º, 123.43º"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 45.234, 'longitude': 123.43}
test = "-45º 23' 12'', -123º 23' 56''"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -45.38667, 'longitude': 123.39889}
test = "32º21'59''N, 115º 23' 14''W"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 32.36639, 'longitude': -115.38722}
test = "12 43 56 North, 23 56 12 East"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 12.73222, 'longitude': 23.93667}
test = "52 15 10N, 0 01 54W"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.25278, 'longitude': -0.03167}
test = "52 35 31N, 1 28 05E"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.59194, 'longitude': 1.46806}
test = "30° 47' N, 34° 46' E"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 30.78333, 'longitude': 34.76667}
'''
test = "AT; 1 spm, CN 3-41, 21°00′ N, 112°30′ E"
for result, start, end in coordinateParser.scanString(test):
assert coordinate(result).calcDD() == {'latitude': 21.0, 'longitude': 112.5}
test = '27°43.886, 34°15.663'
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 27.73143, 'longitude': 34.26105}
test = '49°17’13”N, 13°40’18”E'
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 49.28694, 'longitude': 13.67167}
test = '45.9215º; -76.6219º'
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -45.9215, 'longitude': 76.6219}
test = "latitude 32°47′47″ S and longitude 26°50′56″ E"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -32.79639, 'longitude': 26.84889}
test = "N15°46′ W87°00'"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 15.76667, 'longitude': -87.0}
test = "latitude of 35°13', longitude of 4°11'"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 35.21667, 'longitude': 4.18333}
test = "expects to find coordinates: 52 degrees, 42 minutes north, 124 degrees, 50 minutes west"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.7, 'longitude': -124.83333}
# Should return an exception, but instead calculates latitude as 6º 10'
#test = "expects to find coordinates: 5°70'N, 73°46'W" # Minutes greater than 60
#test = "expects not to find: 4.5–5.0 "
'''
|
JournalMap/GeoParsers
|
pyparser_geoparser_testing.py
|
Python
|
gpl-2.0
| 3,064
| 0.011624
|
import binascii
def b2h(the_bytes):
return binascii.hexlify(the_bytes).decode("utf8")
def b2h_rev(the_bytes):
return binascii.hexlify(bytearray(reversed(the_bytes))).decode("utf8")
|
maraoz/proofofexistence
|
pycoin/serialize/__init__.py
|
Python
|
mit
| 200
| 0.01
|
# -*- coding: utf-8 -*-
__author__ = 'frank'
from flask import Flask, request, url_for, render_template, g, session, flash
from flask_wtf.csrf import CsrfProtect
from flask_debugtoolbar import DebugToolbarExtension
from flask.ext.login import LoginManager
from flask.ext.moment import Moment
from . import filters, permissions
from .utils import signout_user
from .config import load_config
config = load_config()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.signin'
login_manager.login_message = '请先登陆以获得相应操作权限'
# convert python's encoding to utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def register_login_manager(app):
"""注册login_manager"""
login_manager.init_app(app)
def register_jinja(app):
# inject vars into template context
@app.context_processor
def inject_vars():
from .models import Category
from datetime import date
year = date.today().strftime("%Y")
return dict(Permission=permissions.Permission, categories=Category.query.all(), year=year)
# url generator for pagination
def url_for_other_page(page):
"""Generate url for pagination"""
view_args = request.view_args.copy()
args = request.args.copy().to_dict()
combined_args = dict(view_args.items() + args.items())
combined_args['page'] = page
return url_for(request.endpoint, **combined_args)
# same effect
# args = request.args.copy()
# args['page'] = page
# return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
def register_routes(app):
from .controllers import admin, site, user, auth
app.register_blueprint(site.bp, url_prefix='')
app.register_blueprint(admin.bp, url_prefix='/admin')
app.register_blueprint(user.bp, url_prefix='/user')
app.register_blueprint(auth.bp, url_prefix='/auth')
def register_error_handle(app):
@app.errorhandler(403)
def page_403(error):
return render_template('site/403.html'), 403
@app.errorhandler(404)
def page_404(error):
return render_template('site/404.html'), 404
@app.errorhandler(405)
def page_405(error):
return render_template('site/405.html'), 405
@app.errorhandler(500)
def page_500(error):
return render_template('site/500.html'), 500
def register_db(app):
from .models import db
db.init_app(app)
def register_mail(app):
from .utils import mail
mail.init_app(app)
def get_mail_handler():
import logging
from logging.handlers import SMTPHandler
credentials = (config.SMTP_USER, config.SMTP_PASSWORD)
mail_handler = SMTPHandler(config.SMTP_SERVER, config.SMTP_FROM,
config.SMTP_ADMIN, 'sf-log', credentials, ())
from logging import Formatter
mail_handler.setFormatter(Formatter('''
Message type: %(levelname)s
Location: %(pathname)s:%(lineno)d
Module: %(module)s
Function: %(funcName)s
Time: %(asctime)s
Message:
%(message)s
'''))
mail_handler.setLevel(logging.ERROR)
return mail_handler
def register_logger(app):
"""send error log to admin by smtp"""
if not app.debug:
app.logger.addHandler(get_mail_handler())
def register_moment(app):
moment = Moment(app)
def get_current_user():
"""获取当前user,同时进行session有效性的检测
放在utils.py会造成环路引用
"""
# 对应utils.py中signin_user方法
# 豆瓣登陆则验证邮箱, 非豆瓣登陆不验证邮箱直接返回空值退出
if 'signin_method' not in session:
return None
if 'user_id' not in session:
return None
# else:
# for k,v in session.iteritems():
# print k,v
from .models import User
# 此处是user_id而不是douban_id
user = User.query.filter(User.id == session['user_id']).first()
if not user:
signout_user()
return None
return user
def create_app():
app = Flask(__name__)
app.config.from_object(config)
# CSRF protect
CsrfProtect(app)
if app.debug:
DebugToolbarExtension(app)
register_jinja(app)
register_routes(app)
register_error_handle(app)
register_db(app)
register_logger(app)
register_login_manager(app)
register_moment(app)
register_mail(app)
app.jinja_env.filters['markdown'] = filters.markdown
app.jinja_env.filters['normalize'] = filters.normalize
app.jinja_env.filters[
'engrolename_chinrolename'] = filters.engrolename_chinrolename
app.jinja_env.filters['ismyself'] = filters.ismyself
@app.before_request
def before_request():
g.user = get_current_user()
if g.user:
if not g.user.is_activated:
flash('账户尚未激活,请先登陆' + g.user.email + '查收验证邮件并激活账户')
signout_user()
if g.user.is_banned:
flash('账户已被禁用, 请联系管理员')
signout_user()
return app
app = create_app()
|
Fansion/sharefun
|
sharefun/__init__.py
|
Python
|
mit
| 5,284
| 0.000593
|
from django.http import HttpResponse, HttpRequest
from typing import Optional
import ujson
from django.utils.translation import ugettext as _
from zerver.lib.actions import do_mute_topic, do_unmute_topic
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.response import json_success, json_error
from zerver.lib.topic_mutes import topic_is_muted
from zerver.lib.streams import (
access_stream_by_id,
access_stream_by_name,
access_stream_for_unmute_topic_by_id,
access_stream_for_unmute_topic_by_name,
check_for_exactly_one_stream_arg,
)
from zerver.lib.validator import check_int
from zerver.models import get_stream, Stream, UserProfile
def mute_topic(user_profile: UserProfile,
stream_id: Optional[int],
stream_name: Optional[str],
topic_name: str) -> HttpResponse:
if stream_name is not None:
(stream, recipient, sub) = access_stream_by_name(user_profile, stream_name)
else:
assert stream_id is not None
(stream, recipient, sub) = access_stream_by_id(user_profile, stream_id)
if topic_is_muted(user_profile, stream.id, topic_name):
return json_error(_("Topic already muted"))
do_mute_topic(user_profile, stream, recipient, topic_name)
return json_success()
def unmute_topic(user_profile: UserProfile,
stream_id: Optional[int],
stream_name: Optional[str],
topic_name: str) -> HttpResponse:
error = _("Topic is not muted")
if stream_name is not None:
stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)
else:
assert stream_id is not None
stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)
if not topic_is_muted(user_profile, stream.id, topic_name):
return json_error(error)
do_unmute_topic(user_profile, stream, topic_name)
return json_success()
@has_request_variables
def update_muted_topic(request: HttpRequest,
user_profile: UserProfile,
stream_id: Optional[int]=REQ(validator=check_int, default=None),
stream: Optional[str]=REQ(default=None),
topic: str=REQ(),
op: str=REQ()) -> HttpResponse:
check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)
if op == 'add':
return mute_topic(
user_profile=user_profile,
stream_id=stream_id,
stream_name=stream,
topic_name=topic,
)
elif op == 'remove':
return unmute_topic(
user_profile=user_profile,
stream_id=stream_id,
stream_name=stream,
topic_name=topic,
)
|
dhcrzf/zulip
|
zerver/views/muting.py
|
Python
|
apache-2.0
| 2,792
| 0.005372
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import operator
import optparse
import os
import sys
import time
import yaml
import re
import getpass
import subprocess
from ansible import __version__
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.utils.unicode import to_bytes
from ansible.utils.display import Display
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
#FIXME: epilog parsing: OptionParser.format_epilog = lambda self, formatter: self.epilog
def format_help(self, formatter=None, epilog=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
class CLI(object):
''' code behind bin/ansible* programs '''
VALID_ACTIONS = ['No Actions']
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
def __init__(self, args, display=None):
"""
Base init method for all command line programs
"""
self.args = args
self.options = None
self.parser = None
self.action = None
if display is None:
self.display = Display()
else:
self.display = display
def set_action(self):
"""
Get the action the user wants to execute from the sys argv list.
"""
for i in range(0,len(self.args)):
arg = self.args[i]
if arg in self.VALID_ACTIONS:
self.action = arg
del self.args[i]
break
if not self.action:
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
Actually runs a child defined method using the execute_<action> pattern
"""
fn = getattr(self, "execute_%s" % self.action)
fn()
def parse(self):
raise Exception("Need to implement!")
def run(self):
if self.options.verbosity > 0:
self.display.display("Using %s as config file" % C.CONFIG_FILE)
@staticmethod
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
''' prompt for vault password and/or password change '''
vault_pass = None
new_vault_pass = None
try:
if ask_vault_pass:
vault_pass = getpass.getpass(prompt="Vault password: ")
if ask_vault_pass and confirm_vault:
vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
if vault_pass != vault_pass2:
raise errors.AnsibleError("Passwords do not match")
if ask_new_vault_pass:
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
if ask_new_vault_pass and confirm_new:
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
if new_vault_pass != new_vault_pass2:
raise errors.AnsibleError("Passwords do not match")
except EOFError:
pass
# enforce no newline chars at the end of passwords
if vault_pass:
vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
if new_vault_pass:
new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
return vault_pass, new_vault_pass
def ask_passwords(self):
''' prompt for connection and become passwords if needed '''
op = self.options
sshpass = None
becomepass = None
become_prompt = ''
try:
if op.ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % op.become_method.upper()
if op.become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if op.ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def normalize_become_options(self):
''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
if self.options.become:
pass
elif self.options.sudo:
self.options.become = True
self.options.become_method = 'sudo'
elif self.options.su:
self.options.become = True
self.options.become_method = 'su'
def validate_conflicts(self, vault_opts=False, runas_opts=False):
''' check for conflicting options '''
op = self.options
if vault_opts:
# Check for vault related conflicts
if (op.ask_vault_pass and op.vault_password_file):
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
if runas_opts:
# Check for privilege escalation conflicts
if (op.su or op.su_user or op.ask_su_pass) and \
(op.sudo or op.sudo_user or op.ask_sudo_pass) or \
(op.su or op.su_user or op.ask_su_pass) and \
(op.become or op.become_user or op.become_ask_pass) or \
(op.sudo or op.sudo_user or op.ask_sudo_pass) and \
(op.become or op.become_user or op.become_ask_pass):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass')"
" are exclusive of each other")
@staticmethod
def expand_tilde(option, opt, value, parser):
setattr(parser.values, option.dest, os.path.expanduser(value))
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None, fork_opts=False):
''' create an options parser for most ansible scripts '''
#FIXME: implemente epilog parsing
#OptionParser.format_epilog = lambda self, formatter: self.epilog
# base opts
parser = SortedOptParser(usage, version=CLI.version("%prog"))
parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
if runtask_opts:
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST,
default=C.DEFAULT_HOST_LIST, action="callback", callback=CLI.expand_tilde, type=str)
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None,
action="callback", callback=CLI.expand_tilde, type=str)
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
if fork_opts:
parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
if vault_opts:
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE,
dest='vault_password_file', help="vault password file", action="callback",
callback=CLI.expand_tilde, type=str)
if subset_opts:
parser.add_option('-t', '--tags', dest='tags', default='all',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags',
help="only run plays and tasks whose tags do not match these values")
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
# priv user defaults to root later on to enable detecting when this option was given here
parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
parser.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (nopasswd implied)")
parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string',
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
parser.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if connect_opts:
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for connection password')
parser.add_option('--private-key','--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_option('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
if meta_opts:
parser.add_option('--force-handlers', dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache")
return parser
@staticmethod
def version(prog):
''' return ansible version '''
result = "{0} {1}".format(prog, __version__)
gitinfo = CLI._gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
return result
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = branch[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
@staticmethod
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = CLI._git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
@staticmethod
def pager(text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
print(text)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
print(text)
else:
CLI.pager_pipe(text, os.environ['PAGER'])
elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
CLI.pager_pipe(text, 'less')
else:
print(text)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=text)
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(self, text):
t = self._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = self._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = self._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = self._URL.sub(r"\1", t) # U(word) => word
t = self._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
|
woltage/ansible
|
lib/ansible/cli/__init__.py
|
Python
|
gpl-3.0
| 20,703
| 0.007149
|
# -*- coding: utf-8 -*-
import datetime
from django.conf import settings
from django.test import TestCase, override_settings
from django.utils import timezone
from django_dynamic_fixture import G
from apps.events.models import AttendanceEvent, Event
class EventOrderedByRegistrationTestCase(TestCase):
def setUp(self):
self.FEATURED_TIMEDELTA_SETTINGS = settings
# Override settings so that the tests will work even if we update the default delta
self.FEATURED_TIMEDELTA_SETTINGS.OW4_SETTINGS['events']['OW4_EVENTS_FEATURED_DAYS_FUTURE'] = 7
self.FEATURED_TIMEDELTA_SETTINGS.OW4_SETTINGS['events']['OW4_EVENTS_FEATURED_DAYS_PAST'] = 7
def test_registration_no_push_forward(self):
"""
Tests that an AttendanceEvent with registration date far in the future is sorted by its event end date,
like any other event.
"""
today = timezone.now()
month_ahead = today + datetime.timedelta(days=30)
month_ahead_plus_five = month_ahead + datetime.timedelta(days=5)
normal_event = G(Event, event_start=month_ahead, event_end=month_ahead)
pushed_event = G(Event, event_start=month_ahead_plus_five, event_end=month_ahead_plus_five)
G(AttendanceEvent, registration_start=month_ahead_plus_five, registration_end=month_ahead_plus_five,
event=pushed_event)
expected_order = [normal_event, pushed_event]
with override_settings(settings=self.FEATURED_TIMEDELTA_SETTINGS):
self.assertEqual(list(Event.by_registration.all()), expected_order)
def test_registration_start_pushed_forward(self):
"""
Tests that an AttendanceEvent with registration date within the "featured delta" (+/- 7 days from today)
will be pushed ahead in the event list, thus sorted by registration start rather than event end.
"""
today = timezone.now()
three_days_ahead = today + datetime.timedelta(days=3)
month_ahead = today + datetime.timedelta(days=30)
month_ahead_plus_five = month_ahead + datetime.timedelta(days=5)
normal_event = G(Event, event_start=month_ahead, event_end=month_ahead)
pushed_event = G(Event, event_start=month_ahead_plus_five, event_end=month_ahead_plus_five)
G(AttendanceEvent, registration_start=three_days_ahead, registration_end=three_days_ahead, event=pushed_event)
expected_order = [pushed_event, normal_event]
with override_settings(settings=self.FEATURED_TIMEDELTA_SETTINGS):
self.assertEqual(list(Event.by_registration.all()), expected_order)
def test_registration_past_push_forward(self):
"""
Tests that an AttendanceEvent with a registration date in the past, outside the "featured delta" (+/- 7 days)
will be sorted by the event's end date.
"""
today = timezone.now()
month_ahead = today + datetime.timedelta(days=30)
month_ahead_plus_five = month_ahead + datetime.timedelta(days=5)
month_back = today - datetime.timedelta(days=30)
normal_event = G(Event, event_start=month_ahead, event_end=month_ahead)
pushed_event = G(Event, event_start=month_ahead_plus_five, event_end=month_ahead_plus_five)
G(AttendanceEvent, registration_start=month_back, registration_end=month_back, event=pushed_event)
expected_order = [normal_event, pushed_event]
with override_settings(settings=self.FEATURED_TIMEDELTA_SETTINGS):
self.assertEqual(list(Event.by_registration.all()), expected_order)
|
dotKom/onlineweb4
|
apps/events/tests/all_tests.py
|
Python
|
mit
| 3,574
| 0.003637
|
from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from sentry.models import Project, Team
from sentry.web.forms.add_project import AddProjectForm
from sentry.web.frontend.base import OrganizationView
from sentry.utils.http import absolute_uri
ERR_NO_TEAMS = 'You cannot create a new project because there are no teams to assign it to.'
class AddProjectWithTeamForm(AddProjectForm):
team = forms.ChoiceField(
choices=(), required=True,
help_text='The team controls who has access to this project.',
)
class Meta:
fields = ('name', 'team')
model = Project
def __init__(self, user, team_list, *args, **kwargs):
super(AddProjectWithTeamForm, self).__init__(*args, **kwargs)
self.team_list = team_list
if len(self.team_list) == 1:
del self.fields['team']
else:
self.fields['team'].choices = (
(t.slug, t.name)
for t in team_list
)
self.fields['team'].widget.choices = self.fields['team'].choices
def clean_team(self):
value = self.cleaned_data['team']
for team in self.team_list:
if value == team.slug:
return team
return None
def save(self, actor, ip_address):
team = self.cleaned_data.get('team', self.team_list[0])
return super(AddProjectWithTeamForm, self).save(actor, team, ip_address)
class CreateProjectView(OrganizationView):
# TODO(dcramer): I'm 95% certain the access is incorrect here as it would
# be probably validating against global org access, and all we care about is
# team admin
required_scope = 'team:write'
def get_form(self, request, organization, team_list):
return AddProjectWithTeamForm(request.user, team_list, request.POST or None, initial={
'team': request.GET.get('team'),
})
def handle(self, request, organization):
team_list = [
t for t in Team.objects.get_for_user(
organization=organization,
user=request.user,
)
if request.access.has_team_scope(t, self.required_scope)
]
if not team_list:
messages.error(request, ERR_NO_TEAMS)
return self.redirect(reverse('sentry-organization-home', args=[organization.slug]))
form = self.get_form(request, organization, team_list)
if form.is_valid():
project = form.save(request.user, request.META['REMOTE_ADDR'])
return self.redirect(absolute_uri('/{}/{}/settings/install/'.format(
organization.slug,
project.slug,
)))
context = {
'form': form,
}
return self.respond('sentry/create-project.html', context)
|
nicholasserra/sentry
|
src/sentry/web/frontend/create_project.py
|
Python
|
bsd-3-clause
| 2,910
| 0.002062
|
import os
os.environ["PYSDL2_DLL_PATH"] = os.getcwd()
import sdl2
import win32gui
def get_windows_bytitle(title_text, exact = False):
"""
Gets window by title text. [Windows Only]
"""
def _window_callback(hwnd, all_windows):
all_windows.append((hwnd, win32gui.GetWindowText(hwnd)))
windows = []
win32gui.EnumWindows(_window_callback, windows)
if exact:
return [hwnd for hwnd, title in windows if title_text == title]
else:
return [hwnd for hwnd, title in windows if title_text in title]
sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO)
#This will return a handle to an open 'Notepad.exe' window.
window_handle = get_windows_bytitle("Untitled", False)
#Create a window so that the hint below can be set
a = sdl2.SDL_CreateWindow("test window", sdl2.SDL_WINDOWPOS_UNDEFINED,sdl2.SDL_WINDOWPOS_UNDEFINED, 200,200, 0 )
#Set hint as recommended by SDL documentation: https://wiki.libsdl.org/SDL_CreateWindowFrom#Remarks
result = sdl2.SDL_SetHint(sdl2.SDL_HINT_VIDEO_WINDOW_SHARE_PIXEL_FORMAT, hex(id(a)))
print(sdl2.SDL_GetError())
np_window = sdl2.SDL_CreateWindowFrom(window_handle[0])
print(sdl2.SDL_GetError())
np_sur = sdl2.SDL_GetWindowSurface(np_window)
print(sdl2.SDL_GetError())
save_sur = sdl2.SDL_CreateRGBSurface(0,np_sur[0].w,np_sur[0].h,32,0,0,0,0)
print(sdl2.SDL_GetError())
r = sdl2.SDL_BlitSurface(np_sur, None, save_sur, None)
print(sdl2.SDL_GetError())
result = sdl2.SDL_SaveBMP(save_sur,'test.bmp')
print(sdl2.SDL_GetError())
sdl2.SDL_FreeSurface(save_sur)
print(sdl2.SDL_GetError())
|
tpainter/df_everywhere
|
df_everywhere/test/sdltest/StackOverflow_Question_Code.py
|
Python
|
gpl-2.0
| 1,568
| 0.017857
|
#!/usr/bin/python
# Exploit toolkit using shodan module for search exploit & host lookup
# Code : by jimmyromanticdevil
#
# Download :
# Before you run this code you must install shodan lib.
# $ wget [url]http://pypi.python.org/packages/source/s/shodan/shodan-0.4.tar.gz[/url]
# $ tar xvf shodan-0.2.tar.gz
# $ cd shodan-0.2/
# $ sudo python setup.py install
# Api key request:
# See in here : [url]http://www.shodanhq.com/api_doc[/url]
# Rules of shodan :
# 1. Don't make more than 1 query per second.
# 2. Be respectful when using the API, I don't have a lot of resources to work with.
# So users might want to get their own key (have to register on shodan's website).
# Plus all the requests go through shodan servers which might make it pretty slow if many people are using the service.
#
# Special thanks :
# thanks person :5ynl0rd,kiddies aka peneter,ne0 d4rk fl00der,oghie,parc0mx,me0nkz,suryal0e,zee_eichel
# mirwan aka cassaprogy,shadow_maker,suddent_death,aip,r3d3,dawflin,n1nj4,hakz,
# leXel,s3my0n,MaXe,Andre Corleone ,Shamus,and all my friend .
# thanks communty : Tecon-crew<[url]http://tecon-crew.org[/url]>
# Void-labs <[url]http://void-labs.org[/url]>
# Makassar ethical hacker<[url]http://makassarhacker.com/>[/url]
# Intern0t <[url]http://forum.intern0t.net/>[/url]
# Deadc0de <[url]http://forum.deadc0de.or.id/>[/url]
#-----------------------------------------------
import shodan,sys,time,base64,os
from time import sleep
from shodan import WebAPI
__author__='amltbXlyb21hbnRpY2Rldmls'
__email__ ='PHJvbWFudGljZGV2aWwuamltbXlAZ21haWwuY29tPg=='
__api__ ='Z4xjUqqsaQbFgYrnn3EBuoJsSC0VZTyI'#request youre api key and paste in here
_lolz_ = WebAPI(__api__)
def tayping(title):
try:
for i in title:
print "\b%s"%i,
sys.stdout.flush()
time.sleep(0.005)
except ImportError:
print "Some Error",
def check():
try:
checking = "[C]Checking module..."
tayping(checking)
sleep(2)
import shodan
except ImportError:
error ="\n[!]You must install Shodan Module in here :\n[url]http://pypi.python.org/packages/source/s/shodan/...[/url]"
tayping(error_module)
except KeyboardInterrupt:
print "\n[*]Exiting program...\n"
sys.exit(1)
else :
succes="\n[*]Shodan module is available..."
tayping(succes)
sleep(2)
try:
api_check="\n[C]Checking Api key.."
tayping(api_check)
sleep(2)
check_Api = len(__api__)
if check_Api==0:
error_api= "\n[!] Api key is not available\n[!]You must request Api key in here :[url]http://www.shodanhq.com/api_doc[/url]\n\n\n\n"
tayping(error_api)
sleep(2)
elif check_Api != 0:
succces = "\n[*]Api key is available\n\n\n\n"
tayping(succces)
sleep(3)
except KeyboardInterrupt:
print "\n[*] Exiting program...\n"
sys.exit(0)
def clear():
if sys.platform in ('linux-i386', 'linux2', 'darwin'):
SysCls = 'clear'
elif sys.platform == 'win32' or sys.platform == 'dos' or sys.platform[0:5] == 'ms-dos':
SysCls = 'cls'
else:
SysCls = 'unknown'
return SysCls
def title():
__Auth__= base64.b64decode(__author__)
__Eml__ = base64.b64decode(__email__)
title='''
//////////////////////////////////////////////////////
___________ .__ .__ __
\_ _____/___ _________ | | ____ |__|_/ |_
| __)_ \ \/ /\____ \ | | / _ \ | |\ __\\
| \ > < | |_> >| |__( <_> )| | | |
/_______ //__/\_ \| __/ |____/ \____/ |__| |__|
\/ \/|__|/
Toolkit
Coder by : %s
Contach : %s
//////////////////////////////////////////////////////
'''%(__Auth__,__Eml__)
tayping(title)
def expoitdb():
try:
searching_Exploit= raw_input('[+]Search a exploit :')
print '[!]You search [%s] Exploit'% searching_Exploit
wtf = _lolz_.exploitdb.search(searching_Exploit)
more = wtf['total']
print '[!]Found [%s] exploit with result [%s]'%(more,searching_Exploit)
try:
display =raw_input('[!]See all list exploit found?(y/n)')
if display =='y':
ds = wtf['matches']
for i in ds :
print'%s: %s' % (i['id'],i['description'])
except Exception,err:
print'[%s]'%err
try:
display_exploit=raw_input('[+]Select type exploit?(y/n)')
if display_exploit =='y':
print'choois types : remote, webapps, dos, local, shellcode ?'
rock =raw_input('')
print 'youre chooise [%s] exploits'%rock
r = wtf['matches']
for i in r:
if rock ==i['type']:
print'%s: %s' % (i['id'],i['description'])
except Exception,err:
print'[%s]'%err
try:
view_exploit=raw_input('[+]Select exploit to view ?(y/n)')
if view_exploit =='y':
print'[+]Enter exploit id to view :'
v = raw_input('')
lols=wtf['matches']
for i in lols:
if v == str(i['id']):
File_exploit = _lolz_.exploitdb.download(i['id'])
print 'Filename: %s'% File_exploit['filename']
print 'Content-type: %s' % File_exploit['content-type']
print File_exploit['data']
download_exploit= raw_input('[+]download the exploit ?(y/n)')
if download_exploit=='y':
dwnload = open(File_exploit['filename'], 'w')
dwnload.write(File_exploit['data'])
dwnload.close()
print'%s successfully download' % File_exploit['filename']
except Exception,err:
print'[%s]'%err
try_again=raw_input('[+]Do you want to try again ?(y/n):')
while try_again=='y':
os.system(clear())
title()
expoitdb()
try_again=raw_input('[+]Do you want to try again ?(y/n):')
main()
except KeyboardInterrupt, IOError:
print '\nYou pressed Ctrl+C or exited...'
main()
sys.exit(1)
def metasploit():
try:
module_search=raw_input('[!]Search module metasploit :')
print'[!]We will search metasploit module'
m_m = _lolz_.msf.search(module_search)
result = m_m['total']
print 'Modules found: %s'%result
result2 = m_m['matches']
for i in result2:
print '%s: %s' % (i['type'], i['name'])
download =raw_input('[+]Download module : (y/n)')
if download =='y':
file = _lolz_.msf.download(i['fullname'])
print 'Filename: %s' % file['filename']
print 'Content-type: %s' % file['content-type']
print file['data']
try_again = raw_input('[+]Do you want to try again ?(y/n)')
while try_again =='y':
os.system(clear())
title()
metasploit()
try_again = raw_input('[+]Do you want to try again ?(y/n)')
main()
except Exception,err:
print'[%s]'%err
def host():
try:
input_host = raw_input('[+]Input host :')
host_result = _lolz_.host(input_host)
ip =host_result['ip']
country=host_result.get('country', None)
city =host_result.get('city', None)
host_name =host_result['hostnames']
data =host_result['data']
resulting ="""
Ip addres = %s
Country = %s
City = %s
"""%(ip,country,city,)
tayping(resulting)
for i in data :
print """
Port = %s
Banner = %s"""%(i['port'],i['banner'])
try_again = raw_input('[!]try again ?(y/n)')
while try_again =='y':
host()
try_again = raw_input('[!]try again ?(y/n)')
except Exception,err:
print'[%s]'%err
main()
def exit():
teks_exit='\nExiting..\nThanks for use this tools'
tayping(teks_exit)
sleep(2)
sys.exit()
def main():
try:
os.system(clear())
title()
menu = {'1':expoitdb, '2':metasploit, '3':host, '4':exit,}
while True:
print """
Input your chooise:
1) Search exploit
2) Search Metasploit Modules
3) Host lookup
4) Exit
"""
try:
chooise = raw_input('Select you chooise: ')
except KeyboardInterrupt, IOError:
print '\nYou pressed Ctrl+C or exited...'
sys.exit(1)
else:
if chooise in menu.keys():
menu[chooise]()
os.system(clear())
title()
else:
print '\nInvalid selection'
except Exception,err:
print'[%s]'%err
if __name__=='__main__':
check()
main()
|
ArioX/tools
|
shodan.py
|
Python
|
gpl-2.0
| 9,697
| 0.028359
|
# importing wxPython library, see the reference here :
# http://www.wxpython.org/docs/api/wx-module.html
# and an excelent step by step tutorial there :
# http://zetcode.com/wxpython
import wx
from Controller import *
# main function
def main():
# each wx application must have a wx.App object
app = wx.App()
controller = Controller(title = "BLANK_PY2WX")
# entering the endless loop that catches all the events
app.MainLoop()
if __name__ == '__main__':
main()
|
jonathanlurie/BLANK_PY2WX
|
src/main.py
|
Python
|
mit
| 492
| 0.006098
|
import sys
from fabric.api import *
from fabric.contrib import *
from fabric.contrib.project import rsync_project
from defaults import fab
from config import ssh, sudoers
import {%= name %}
@task
def prepare_vm():
sudoers.setup_sudoers_on_vm()
@task(default=True)
def system():
print 'start here'
|
haimich/knick-knack
|
doc/folderstructure/v3.2/ .knick-knack/python-fabric/files/setup.py
|
Python
|
mit
| 309
| 0.02589
|
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Menu for quickly adding waypoints when on move
#----------------------------------------------------------------------------
# Copyright 2007-2008, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from modules.base_module import RanaModule
import cairo
from time import time
from math import pi
def getModule(*args, **kwargs):
return ClickMenu(*args, **kwargs)
class ClickMenu(RanaModule):
"""Overlay info on the map"""
def __init__(self, *args, **kwargs):
RanaModule.__init__(self, *args, **kwargs)
self.lastWaypoint = "(none)"
self.lastWaypointAddTime = 0
self.messageLingerTime = 2
def handleMessage(self, message, messageType, args):
if message == "addWaypoint":
m = self.m.get("waypoints", None)
if m is not None:
self.lastWaypoint = m.newWaypoint()
self.lastWaypointAddTime = time()
def drawMapOverlay(self, cr):
"""Draw an overlay on top of the map, showing various information
about position etc."""
# waypoins will be done in another way, so this is disabled for the time being
# (x,y,w,h) = self.get('viewport')
#
# dt = time() - self.lastWaypointAddTime
# if(dt > 0 and dt < self.messageLingerTime):
# self.drawNewWaypoint(cr, x+0.5*w, y+0.5*h, w*0.3)
# else:
# m = self.m.get('clickHandler', None)
# if(m != None):
# m.registerXYWH(x+0.25*w,y+0.25*h,w*0.5,h*0.5, "clickMenu:addWaypoint")
def drawNewWaypoint(self, cr, x, y, size):
text = self.lastWaypoint
cr.set_font_size(200)
extents = cr.text_extents(text)
(w, h) = (extents[2], extents[3])
cr.set_source_rgb(0, 0, 0.5)
cr.arc(x, y, size, 0, 2 * pi)
cr.fill()
x1 = x - 0.5 * w
y1 = y + 0.5 * h
border = 20
cr.set_source_rgb(1, 1, 1)
cr.move_to(x1, y1)
cr.show_text(text)
cr.fill()
|
ryfx/modrana
|
modules/_mod_clickMenu.py
|
Python
|
gpl-3.0
| 2,761
| 0.002898
|
from __future__ import unicode_literals
import copy
import sys
from functools import update_wrapper
from django.utils.six.moves import zip
import django.db.models.manager # Imported to register signal handler.
from django.conf import settings
from django.core.exceptions import (ObjectDoesNotExist,
MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS)
from django.core import validators
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db import (router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute, deferred_class_factory
from django.db.models.deletion import Collector
from django.db.models.options import Options
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.text import get_text_list, capfirst
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# six.with_metaclass() inserts an extra class called 'NewBase' in the
# inheritance tree: Model -> NewBase -> object. But the initialization
# should be executed only once for a given model class.
# attrs will never be empty for classes declared in the standard way
# (ie. with the `class` keyword). This is quite robust.
if name == 'NewBase' and attrs == {}:
return super_new(cls, name, bases, attrs)
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase) and
not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception(str('DoesNotExist'),
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,),
module, attached_to=new_class))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception(str('MultipleObjectsReturned'),
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,),
module, attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ManyToOneRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if not six.PY3 and hasattr(self, '__unicode__'):
if type(self).__unicode__ == Model.__str__:
klass_name = type(self).__name__
raise RuntimeError("%s.__unicode__ is aliased to __str__. Did"
" you apply @python_2_unicode_compatible"
" without defining __str__?" % klass_name)
return force_text(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
if not self._deferred:
return super(Model, self).__reduce__()
data = self.__dict__
defers = []
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
return (model_unpickle, (model, defers), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and self._deferred and using == self._state.db:
field_names = set()
for field in self._meta.fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
deferred_fields = [
f.attname for f in self._meta.fields
if f.attname not in self.__dict__
and isinstance(self.__class__.__dict__[f.attname],
DeferredAttribute)]
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw', 'cls', and 'origin').
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
if cls is None:
cls = self.__class__
meta = cls._meta
if not meta.proxy:
origin = cls
else:
meta = cls._meta
if origin and not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
# We also go through this process to defer the save of proxy objects
# to their actual underlying model.
if not raw or meta.proxy:
if meta.proxy:
org = cls
else:
org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
# this field). If so, fill it.
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self.save_base(cls=parent, origin=org, using=using,
update_fields=update_fields)
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy, we
# set attname directly, bypassing the descriptor.
# Invalidate the related object cache, in case it's been
# accidentally populated. A fresh instance will be
# re-built from the database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
if meta.proxy:
return
if not meta.proxy:
non_pks = [f for f in meta.local_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
record_exists = True
manager = cls._base_manager
if pk_set:
# Determine if we should do an update (pk already exists, forced update,
# no force_insert)
if ((force_update or update_fields) or (not force_insert and
manager.using(using).filter(pk=pk_val).exists())):
if force_update or non_pks:
values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
if values:
rows = manager.using(using).filter(pk=pk_val)._update(values)
if force_update and not rows:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not rows:
raise DatabaseError("Save with update_fields did not affect any rows.")
else:
record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_fields
if not pk_set:
if force_update or update_fields:
raise ValueError("Cannot force an update in save() with no primary key.")
fields = [f for f in fields if not isinstance(f, AutoField)]
record_exists = False
update_pk = bool(meta.has_auto_field and not pk_set)
result = manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed(using=using)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if origin and not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not record_exists),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = is_next and 'gt' or 'lt'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field, unique_for):
opts = self._meta
return _("%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
'field_name': six.text_type(capfirst(opts.get_field(field).verbose_name)),
'date_field': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
'lookup': lookup_type,
}
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
model_name = capfirst(opts.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field = opts.get_field(field_name)
field_label = capfirst(field.verbose_name)
# Insert the error into the error dict, very sneaky
return field.error_messages['unique'] % {
'model_name': six.text_type(model_name),
'field_label': six.text_type(field_label)
}
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
field_labels = get_text_list(field_labels, _('and'))
return _("%(model_name)s with this %(field_label)s already exists.") % {
'model_name': six.text_type(model_name),
'field_label': six.text_type(field_labels)
}
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occured.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in validators.EMPTY_VALUES:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.messages
if errors:
raise ValidationError(errors)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed(using=using)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
def model_unpickle(model, attrs):
"""
Used to unpickle Model subclasses with deferred fields.
"""
cls = deferred_class_factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
|
havard024/prego
|
venv/lib/python2.7/site-packages/django/db/models/base.py
|
Python
|
mit
| 44,041
| 0.002021
|
# -*- coding: utf-8 -*-
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
# Plugin to read the mountainsmap surface format (sur)
#Current state can bring support to the surface format if the file is an
#attolight hyperspectral map, but cannot bring write nor support for other
#mountainsmap files (.pro etc.). I need to write some tests, check whether the
#comments can be systematically parsed into metadata and write a support for
#original_metadata or other
import logging
#Dateutil allows to parse date but I don't think it's useful here
#import dateutil.parser
import numpy as np
#Commented for now because I don't know what purpose it serves
#import traits.api as t
from copy import deepcopy
import struct
import sys
import zlib
import os
import warnings
#Maybe later we can implement reading the class with the io utils tools instead
#of re-defining read functions in the class
#import hyperspy.misc.io.utils_readfile as iou
#This module will prove useful when we write the export function
#import hyperspy.misc.io.tools
#DictionaryTreeBrowser class handles the fancy metadata dictionnaries
#from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.exceptions import MountainsMapFileError
_logger = logging.getLogger(__name__)
# Plugin characteristics
# ----------------------
format_name = 'Digital Surf Surface'
description = """Read data from the proprietary .sur file format from Digital
Surf. Allows hyperspy to interact with the mountains map software"""
full_support = False #Check with the boys once this is done
# Recognised file extension
file_extensions = ('sur', 'SUR','pro','PRO')
default_extension = 0
# Writing features
writes = False #First we will check with the load
non_uniform_axis = False
# ----------------------
class DigitalSurfHandler(object):
""" Class to read Digital Surf MountainsMap files.
Attributes
----------
filename, signal_dict, _work_dict, _list_sur_file_content, _Object_type,
_N_data_object, _N_data_channels, _initialized
Methods
-------
parse_file, parse_header, get_image_dictionaries
Class Variables
---------------
_object_type : dict key: int containing the mountainsmap object types
"""
#Object types
_mountains_object_types = {
-1: "_ERROR" ,
0: "_UNKNOWN" ,
1: "_PROFILE" ,
2: "_SURFACE" ,
3: "_BINARYIMAGE" ,
4: "_PROFILESERIE" ,
5: "_SURFACESERIE" ,
6: "_MERIDIANDISC" ,
7: "_MULTILAYERPROFILE" ,
8: "_MULTILAYERSURFACE" ,
9: "_PARALLELDISC" ,
10: "_INTENSITYIMAGE" ,
11: "_INTENSITYSURFACE" ,
12: "_RGBIMAGE" ,
13: "_RGBSURFACE" ,
14: "_FORCECURVE" ,
15: "_SERIEOFFORCECURVE" ,
16: "_RGBINTENSITYSURFACE",
20: "_SPECTRUM" ,
21: "_HYPCARD" ,
}
def __init__(self, filename=None):
#We do not need to check for file existence here because
#io module implements it in the load function
self.filename = filename
#The signal_dict dictionnary has to be returned by the
#file_reader function. Apparently original_metadata needs
#to be set
self.signal_dict = {'data': np.empty((0,0,0)),
'axes': [],
'metadata': {},
'original_metadata': {}
}
#Dictionary to store, read and write fields in the binary file
#defined in the MountainsMap SDK. Structure is
# _work_dict['Field']['value'] : access field value
# _work_dict['Field']['b_unpack_fn'](f) : unpack value from file f
# _work_dict['Field']['b_pack_fn'](f,v): pack value v in file f
self._work_dict = \
{
"_01_Signature":
{
'value':"DSCOMPRESSED",
'b_unpack_fn': lambda f: self._get_str(f,12,"DSCOMPRESSED"),
'b_pack_fn': lambda f,v: self._set_str(f,v,12),
},
"_02_Format":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_03_Number_of_Objects":
{
'value':1,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_04_Version":
{
'value':1,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_05_Object_Type":
{
'value':2,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_06_Object_Name":
{
'value':"",
'b_unpack_fn': lambda f: self._get_str(f,30,"DOSONLY"),
'b_pack_fn': lambda f,v: self._set_str(f,v,30),
},
"_07_Operator_Name":
{
'value':"",
'b_unpack_fn': lambda f: self._get_str(f,30,""),
'b_pack_fn': lambda f,v: self._set_str(f,v,30),
},
"_08_P_Size":
{
'value':1,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_09_Acquisition_Type":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_10_Range_Type":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_11_Special_Points":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_12_Absolute":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_13_Gauge_Resolution":
{
'value':0.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_14_W_Size":
{
'value':0,
'b_unpack_fn': self._get_int32,
'b_pack_fn': self._set_int32,
},
"_15_Size_of_Points":
{
'value':16,
'b_unpack_fn':lambda f: self._get_int16(f,32),
'b_pack_fn': self._set_int16,
},
"_16_Zmin":
{
'value':0,
'b_unpack_fn':self._get_int32,
'b_pack_fn':self._set_int32,
},
"_17_Zmax":
{
'value':0,
'b_unpack_fn':self._get_int32,
'b_pack_fn': self._set_int32,
},
"_18_Number_of_Points":
{
'value':0,
'b_unpack_fn': self._get_int32,
'b_pack_fn': self._set_int32,
},
"_19_Number_of_Lines":
{
'value':0,
'b_unpack_fn':self._get_int32,
'b_pack_fn':self._set_int32,
},
"_20_Total_Nb_of_Pts":
{
'value':0,
'b_unpack_fn': self._get_int32,
'b_pack_fn': self._set_int32
},
"_21_X_Spacing":
{
'value':1.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_22_Y_Spacing":
{
'value':1.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_23_Z_Spacing":
{
'value':1.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_24_Name_of_X_Axis":
{
'value':'X',
'b_unpack_fn': lambda f: self._get_str(f,16,"X"),
'b_pack_fn': lambda f,v: self._set_str(f,v,16),
},
"_25_Name_of_Y_Axis":
{
'value':'Y',
'b_unpack_fn': lambda f: self._get_str(f,16,"Y"),
'b_pack_fn': lambda f,v: self._set_str(f,v,16),
},
"_26_Name_of_Z_Axis":
{
'value':'Z',
'b_unpack_fn': lambda f: self._get_str(f,16,"Z"),
'b_pack_fn': lambda f,v: self._set_str(f,v,16),
},
"_27_X_Step_Unit":
{
'value':'um',
'b_unpack_fn': lambda f: self._get_str(f,16,"um"),
'b_pack_fn': lambda f,v: self._set_str(f,v,16),
},
"_28_Y_Step_Unit":
{
'value':'um',
'b_unpack_fn': lambda f: self._get_str(f,16,"um"),
'b_pack_fn': lambda f,v: self._set_str(f,v,16),
},
"_29_Z_Step_Unit":
{
'value':'um',
'b_unpack_fn': lambda f: self._get_str(f,16,"um"),
'b_pack_fn': lambda f,v: self._set_str(f,v,16),
},
"_30_X_Length_Unit":
{
'value':'um',
'b_unpack_fn': lambda f: self._get_str(f,16,"um"),
'b_pack_fn': lambda f,v: self._set_str(f,v,16),
},
"_31_Y_Length_Unit":
{
'value':'um',
'b_unpack_fn': lambda f: self._get_str(f,16,"um"),
'b_pack_fn': lambda f,v: self._set_str(f,v,16),
},
"_32_Z_Length_Unit":
{
'value':'um',
'b_unpack_fn': lambda f: self._get_str(f,16,"um"),
'b_pack_fn': lambda f,v: self._set_str(f,v,16),
},
"_33_X_Unit_Ratio":
{
'value':1.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_34_Y_Unit_Ratio":
{
'value':1.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_35_Z_Unit_Ratio":
{
'value':1.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_36_Imprint":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_37_Inverted":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_38_Levelled":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_39_Obsolete":
{
'value':0,
'b_unpack_fn': lambda f: self._get_bytes(f,12),
'b_pack_fn': lambda f,v: self._set_bytes(f,v,12),
},
"_40_Seconds":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_41_Minutes":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_42_Hours":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_43_Day":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_44_Month":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_45_Year":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_46_Day_of_week":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_47_Measurement_duration":
{
'value':0.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_48_Compressed_data_size":
{
'value':0,
'b_unpack_fn':self._get_uint32,
'b_pack_fn':self._set_uint32,
},
"_49_Obsolete":
{
'value':0,
'b_unpack_fn': lambda f: self._get_bytes(f,6),
'b_pack_fn': lambda f,v: self._set_bytes(f,v,6),
},
"_50_Comment_size":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_51_Private_size":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_52_Client_zone":
{
'value':0,
'b_unpack_fn': lambda f: self._get_bytes(f,128),
'b_pack_fn': lambda f,v: self._set_bytes(f,v,128),
},
"_53_X_Offset":
{
'value':0.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_54_Y_Offset":
{
'value':0.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_55_Z_Offset":
{
'value':0.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_56_T_Spacing":\
{
'value':0.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_57_T_Offset":
{
'value':0.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_58_T_Axis_Name":
{
'value':'T',
'b_unpack_fn': lambda f: self._get_str(f,13,"Wavelength"),
'b_pack_fn': lambda f,v: self._set_str(f,v,13),
},
"_59_T_Step_Unit":
{
'value':'um',
'b_unpack_fn': lambda f: self._get_str(f,13,"nm"),
'b_pack_fn': lambda f,v: self._set_str(f,v,13),
},
"_60_Comment":
{
'value':0,
'b_unpack_fn': self._unpack_comment,
'b_pack_fn': self._pack_comment,
},
"_61_Private_zone":
{
'value':0,
'b_unpack_fn': self._unpack_private,
'b_pack_fn': self._pack_private,
},
"_62_points":
{
'value':0,
'b_unpack_fn': self._unpack_data,
'b_pack_fn': lambda f,v: 0, #Not implemented
},
}
#List of all measurement
self._list_sur_file_content = []
#The surface files convention is that when saving multiple data
#objects at once, they are all packed in the same binary file.
#Every single object contains a full header with all the sections,
# but only the first one contains the relevant infos about
#object type, the number of objects in the file and other.
#Hence they will be made attributes.
#Object type
self._Object_type = "_UNKNOWN"
#Number of data objects in the file.
self._N_data_object = 1
self._N_data_channels = 1
### Read methods
def _read_sur_file(self):
"""Read the binary, possibly compressed, content of the surface
file. Surface files can be encoded as single or a succession
of objects. The file is thus read iteratively and from metadata of the
first file """
with open(self.filename,'rb') as f:
#We read the first object
self._read_single_sur_object(f)
#We append the first object to the content list
self._append_work_dict_to_content()
#Lookup how many objects are stored in the file and save
self._N_data_object = self._get_work_dict_key_value("_03_Number_of_Objects")
self._N_data_channels = self._get_work_dict_key_value('_08_P_Size')
#Determine how many objects we need to read
if self._N_data_channels>0 and self._N_data_object>0:
N_objects_to_read = self._N_data_channels*self._N_data_object
elif self._N_data_channels>0:
N_objects_to_read = self._N_data_channels
elif self._N_data_object>0:
N_objects_to_read = self._N_data_object
else:
N_objects_to_read = 1
#Lookup what object type we are dealing with and save
self._Object_type = \
DigitalSurfHandler._mountains_object_types[ \
self._get_work_dict_key_value("_05_Object_Type")]
#if more than 1
if N_objects_to_read > 1:
#continue reading until everything is done
for i in range(1,N_objects_to_read):
#We read an object
self._read_single_sur_object(f)
#We append it to content list
self._append_work_dict_to_content()
def _read_single_sur_object(self,file):
for key,val in self._work_dict.items():
self._work_dict[key]['value'] = val['b_unpack_fn'](file)
def _append_work_dict_to_content(self):
"""Save the values stored in the work dict in the surface file list"""
datadict = deepcopy( \
{key:val['value'] for key,val in self._work_dict.items()})
self._list_sur_file_content.append(datadict)
def _get_work_dict_key_value(self,key):
return self._work_dict[key]['value']
### Signal dictionary methods
def _build_sur_dict(self):
"""Create a signal dict with an unpacked object"""
#If the signal is of the type spectrum or hypercard
if self._Object_type in ["_HYPCARD",]:
self._build_hyperspectral_map()
elif self._Object_type in ["_SPECTRUM"]:
self._build_spectrum()
elif self._Object_type in ["_PROFILE"]:
self._build_general_1D_data()
elif self._Object_type in ["_PROFILESERIE"]:
self._build_1D_series()
elif self._Object_type in ["_SURFACE"]:
self._build_surface()
elif self._Object_type in ["_SURFACESERIE"]:
self._build_surface_series()
elif self._Object_type in ["_MULTILAYERSURFACE"]:
self._build_surface_series()
elif self._Object_type in ["_RGBSURFACE"]:
self._build_RGB_surface()
elif self._Object_type in ["_RGBIMAGE"]:
self._build_RGB_image()
elif self._Object_type in ["_RGBINTENSITYSURFACE"]:
self._build_RGB_surface()
elif self._Object_type in ["_BINARYIMAGE"]:
self._build_surface()
else:
raise MountainsMapFileError(self._Object_type \
+ "is not a supported mountain object.")
return self.signal_dict
def _build_Xax(self,unpacked_dict,ind=0,nav=False,binned=False):
"""Return X axis dictionary from an unpacked dict. index int and navigate
boolean can be optionally passed. Default 0 and False respectively."""
Xax = { 'name': unpacked_dict['_24_Name_of_X_Axis'],
'size': unpacked_dict['_18_Number_of_Points'],
'index_in_array': ind,
'scale': unpacked_dict['_21_X_Spacing'],
'offset': unpacked_dict['_53_X_Offset'],
'units': unpacked_dict['_27_X_Step_Unit'],
'navigate':nav,
'is_binned':binned,
}
return Xax
def _build_Yax(self,unpacked_dict,ind=1,nav=False,binned=False):
"""Return X axis dictionary from an unpacked dict. index int and navigate
boolean can be optionally passed. Default 1 and False respectively."""
Yax = { 'name': unpacked_dict['_25_Name_of_Y_Axis'],
'size': unpacked_dict['_19_Number_of_Lines'],
'index_in_array': ind,
'scale': unpacked_dict['_22_Y_Spacing'],
'offset': unpacked_dict['_54_Y_Offset'],
'units': unpacked_dict['_28_Y_Step_Unit'],
'navigate':nav,
'is_binned':binned,
}
return Yax
def _build_Tax(self,unpacked_dict,size_key,ind=0,nav=True,binned=False):
"""Return T axis dictionary from an unpacked surface object dict.
Unlike x and y axes, the size key can be determined from various keys:
_14_W_Size, _15_Size_of_Points or _03_Number_of_Objects. index int
and navigate boolean can be optionally passed. Default 0 and
True respectively."""
#The T axis is somewhat special because it is only defined on series
#and is thus only navigation. It is only defined on the first object
#in a serie.
#Here it needs to be checked that the T axis scale is not 0 Otherwise
#it raises hyperspy errors
scale = unpacked_dict['_56_T_Spacing']
if scale == 0:
scale =1
Tax = { 'name': unpacked_dict['_58_T_Axis_Name'],
'size': unpacked_dict[size_key],
'index_in_array': ind,
'scale': scale,
'offset': unpacked_dict['_57_T_Offset'],
'units': unpacked_dict['_59_T_Step_Unit'],
'navigate':nav,
'is_binned':binned,
}
return Tax
### Build methods for individual surface objects
def _build_hyperspectral_map(self,):
"""Build a hyperspectral map. Hyperspectral maps are single-object
files with datapoints of _14_W_Size length"""
#Check that the object contained only one object.
#Probably overkill at this point but better safe than sorry
if len(self._list_sur_file_content) != 1:
raise MountainsMapFileError("Input {:s} File is not of Hyperspectral type".format(self._Object_type))
#We get the dictionary with all the data
hypdic = self._list_sur_file_content[0]
#Add all the axes to the signal dict
self.signal_dict['axes'].append(\
self._build_Yax(hypdic,ind=0,nav=True))
self.signal_dict['axes'].append(\
self._build_Xax(hypdic,ind=1,nav=True))
#Wavelength axis in hyperspectral surface files are stored as T Axis
#with length set as _14_W_Size
self.signal_dict['axes'].append(\
self._build_Tax(hypdic,'_14_W_Size',ind=2,nav=False))
#We reshape the data in the correct format
self.signal_dict['data'] = hypdic['_62_points'].reshape(\
hypdic['_19_Number_of_Lines'],
hypdic['_18_Number_of_Points'],
hypdic['_14_W_Size'],
)
self.signal_dict['metadata'] = self._build_metadata(hypdic)
self.signal_dict['original_metadata'] = self._build_original_metadata()
def _build_general_1D_data(self,):
"""Build general 1D Data objects. Currently work with spectra"""
#Check that the object contained only one object.
#Probably overkill at this point but better safe than sorry
if len(self._list_sur_file_content) != 1:
raise MountainsMapFileError("Corrupt file")
#We get the dictionary with all the data
hypdic = self._list_sur_file_content[0]
#Add the axe to the signal dict
self.signal_dict['axes'].append(\
self._build_Xax(hypdic,ind=0,nav=False))
#We reshape the data in the correct format
self.signal_dict['data'] = hypdic['_62_points']
#Build the metadata
self._set_metadata_and_original_metadata(hypdic)
def _build_spectrum(self,):
"""Build spectra objects. Spectra and 1D series of spectra are
saved in the same object."""
#We get the dictionary with all the data
hypdic = self._list_sur_file_content[0]
#Add the signal axis_src to the signal dict
self.signal_dict['axes'].append(\
self._build_Xax(hypdic,ind=1,nav=False))
#If there is more than 1 spectrum also add the navigation axis
if hypdic['_19_Number_of_Lines'] != 1:
self.signal_dict['axes'].append(\
self._build_Yax(hypdic,ind=0,nav=True))
#We reshape the data in the correct format.
#Edit: the data is now squeezed for unneeded dimensions
self.signal_dict['data'] = np.squeeze(hypdic['_62_points'].reshape(\
hypdic['_19_Number_of_Lines'],
hypdic['_18_Number_of_Points'],
))
self._set_metadata_and_original_metadata(hypdic)
def _build_1D_series(self,):
"""Build a series of 1D objects. The T axis is navigation and set from
the first object"""
#First object dictionary
hypdic = self._list_sur_file_content[0]
#Metadata are set from first dictionary
self._set_metadata_and_original_metadata(hypdic)
#Add the series-axis to the signal dict
self.signal_dict['axes'].append(\
self._build_Tax(hypdic,'_03_Number_of_Objects',ind=0,nav=True))
#All objects must share the same signal axis
self.signal_dict['axes'].append(\
self._build_Xax(hypdic,ind=1,nav=False))
#We put all the data together
data = []
for obj in self._list_sur_file_content:
data.append(obj['_62_points'])
self.signal_dict['data'] = np.stack(data)
def _build_surface(self,):
"""Build a surface"""
#Check that the object contained only one object.
#Probably overkill at this point but better safe than sorry
if len(self._list_sur_file_content) != 1:
raise MountainsMapFileError("CORRUPT {:s} FILE".format(self._Object_type))
#We get the dictionary with all the data
hypdic = self._list_sur_file_content[0]
#Add all the axes to the signal dict
self.signal_dict['axes'].append(\
self._build_Yax(hypdic,ind=0,nav=False))
self.signal_dict['axes'].append(\
self._build_Xax(hypdic,ind=1,nav=False))
#We reshape the data in the correct format
shape = (hypdic['_19_Number_of_Lines'],hypdic['_18_Number_of_Points'])
self.signal_dict['data'] = hypdic['_62_points'].reshape(shape)
self._set_metadata_and_original_metadata(hypdic)
def _build_surface_series(self,):
"""Build a series of surfaces. The T axis is navigation and set from
the first object"""
#First object dictionary
hypdic = self._list_sur_file_content[0]
#Metadata are set from first dictionary
self._set_metadata_and_original_metadata(hypdic)
#Add the series-axis to the signal dict
self.signal_dict['axes'].append(\
self._build_Tax(hypdic,'_03_Number_of_Objects',ind=0,nav=True))
#All objects must share the same signal axes
self.signal_dict['axes'].append(\
self._build_Yax(hypdic,ind=1,nav=False))
self.signal_dict['axes'].append(\
self._build_Xax(hypdic,ind=2,nav=False))
#shape of the surfaces in the series
shape = (hypdic['_19_Number_of_Lines'],hypdic['_18_Number_of_Points'])
#We put all the data together
data = []
for obj in self._list_sur_file_content:
data.append(obj['_62_points'].reshape(shape))
self.signal_dict['data'] = np.stack(data)
def _build_RGB_surface(self,):
"""Build a series of surfaces. The T axis is navigation and set from
P Size"""
#First object dictionary
hypdic = self._list_sur_file_content[0]
#Metadata are set from first dictionary
self._set_metadata_and_original_metadata(hypdic)
#Add the series-axis to the signal dict
self.signal_dict['axes'].append(\
self._build_Tax(hypdic,'_08_P_Size',ind=0,nav=True))
#All objects must share the same signal axes
self.signal_dict['axes'].append(\
self._build_Yax(hypdic,ind=1,nav=False))
self.signal_dict['axes'].append(\
self._build_Xax(hypdic,ind=2,nav=False))
#shape of the surfaces in the series
shape = (hypdic['_19_Number_of_Lines'],hypdic['_18_Number_of_Points'])
#We put all the data together
data = []
for obj in self._list_sur_file_content:
data.append(obj['_62_points'].reshape(shape))
#Pushing data into the dictionary
self.signal_dict['data'] = np.stack(data)
def _build_RGB_image(self,):
"""Build an RGB image. The T axis is navigation and set from
P Size"""
#First object dictionary
hypdic = self._list_sur_file_content[0]
#Metadata are set from first dictionary
self._set_metadata_and_original_metadata(hypdic)
#Add the series-axis to the signal dict
self.signal_dict['axes'].append(\
self._build_Tax(hypdic,'_08_P_Size',ind=0,nav=True))
#All objects must share the same signal axes
self.signal_dict['axes'].append(\
self._build_Yax(hypdic,ind=1,nav=False))
self.signal_dict['axes'].append(\
self._build_Xax(hypdic,ind=2,nav=False))
#shape of the surfaces in the series
shape = (hypdic['_19_Number_of_Lines'],hypdic['_18_Number_of_Points'])
#We put all the data together
data = []
for obj in self._list_sur_file_content:
data.append(obj['_62_points'].reshape(shape))
#Pushing data into the dictionary
self.signal_dict['data'] = np.stack(data)
self.signal_dict.update({'post_process':[self.post_process_RGB]})
### Metadata utility methods
def _build_metadata(self,unpacked_dict):
"""Return a minimalistic metadata dictionary according to hyperspy
format. Accept a dictionary as an input because dictionary with the
headers of a mountians object.
Parameters
----------
unpacked_dict: dictionary from the header of a surface file
Returns
-------
metadict: dictionnary in the hyperspy metadata format
"""
#Formatting for complicated strings. We add parentheses to units
qty_unit = unpacked_dict['_29_Z_Step_Unit']
#We strip unit from any character that might pre-format it
qty_unit = qty_unit.strip(' \t\n()[]')
#If unit string is still truthy after strip we add parentheses
if qty_unit:
qty_unit = "({:s})".format(qty_unit)
quantity_str = " ".join([
unpacked_dict['_26_Name_of_Z_Axis'],qty_unit]).strip()
#Date and time are set in metadata only if all values are not set to 0
date = [unpacked_dict['_45_Year'],
unpacked_dict['_44_Month'],
unpacked_dict['_43_Day']]
if not all(v == 0 for v in date):
date_str = "{:4d}-{:2d}-{:2d}".format(date[0],date[1],date[2])
else:
date_str = ""
time = [unpacked_dict['_42_Hours'],
unpacked_dict['_41_Minutes'],
unpacked_dict['_40_Seconds']]
if not all(v == 0 for v in time):
time_str = "{:d}:{:d}:{:d}".format(time[0],time[1],time[2])
else:
time_str = ""
#Metadata dictionary initialization
metadict = {
"General":{
"authors": unpacked_dict['_07_Operator_Name'],
"date":date_str,
"original_filename": os.path.split(self.filename)[1],
"time": time_str,
},
"Signal": {
"quantity": quantity_str,
"signal_type": "",
},
}
return metadict
def _build_original_metadata(self,):
"""Builds a metadata dictionnary from the header"""
original_metadata_dict = {}
Ntot = (self._N_data_object+1)*(self._N_data_channels+1)
#Iteration over Number of data objects
for i in range(self._N_data_object):
#Iteration over the Number of Data channels
for j in range(self._N_data_channels):
#Creating a dictionary key for each object
k = (i+1)*(j+1)
key = "Object_{:d}_Channel_{:d}".format(i,j)
original_metadata_dict.update({key:{}})
#We load one full object header
a = self._list_sur_file_content[k-1]
#Save it as original metadata dictionary
headerdict = {"H"+l.lstrip('_'):a[l] for l in a if l not in \
("_62_points",'_61_Private_zone')}
original_metadata_dict[key].update({"Header" : headerdict})
#The second dictionary might contain custom mountainsmap params
parsedict = {}
#Check if it is the case and append it to
#original metadata if yes
valid_comment = self._check_comments(a["_60_Comment"],'$','=')
if valid_comment:
parsedict = self._MS_parse(a["_60_Comment"],'$','=')
parsedict = {l.lstrip('_'):m for l,m in parsedict.items()}
original_metadata_dict[key].update({"Parsed" : parsedict})
return original_metadata_dict
def _set_metadata_and_original_metadata(self,unpacked_dict):
"""Run successively _build_metadata and _build_original_metadata
and set signal dictionary with results"""
self.signal_dict['metadata'] = self._build_metadata(unpacked_dict)
self.signal_dict['original_metadata'] = self._build_original_metadata()
def _check_comments(self,commentsstr,prefix,delimiter):
"""Check if comment string is parsable into metadata dictionary.
Some specific lines (empty or starting with @@) will be ignored,
but any non-ignored line must conform to being a title line (beginning
with the TITLESTART indicator) or being parsable (starting with Prefix
and containing the key data delimiter). At the end, the comment is
considered parsable if it contains minimum 1 parsable line and no
non-ignorable non-parsable non-title line.
Parameters
----------
commentstr: string containing comments
prefix: string (or char) character assumed to start each line.
'$' if a .sur file.
delimiter: string that delimits the keyword from value. always '='
Returns
-------
valid: boolean
"""
#Titlestart markers start with Prefix ($) followed by underscore
TITLESTART = '{:s}_'.format(prefix)
#We start by assuming that the comment string is valid
#but contains 0 valid (= parsable) lines
valid = True
N_valid_lines = 0
for line in commentsstr.splitlines():
#Here we ignore any empty line or line starting with @@
ignore = False
if not line.strip() or line.startswith('@@'):
ignore = True
#If the line must not be ignored
if not ignore:
#If line starts with a titlestart marker we it counts as valid
if line.startswith(TITLESTART):
N_valid_lines += 1
# if it does not we check that it has the delimiter and
# starts with prefix
else:
#We check that line contains delimiter and prefix
#if it does the count of valid line is increased
if delimiter in line and line.startswith(prefix):
N_valid_lines += 1
#Otherwise the whole comment string is thrown out
else:
valid = False
#finally, it total number of valid line is 0 we throw out this comments
if N_valid_lines ==0:
valid = False
#return falsiness of the string.
return valid
def _MS_parse(self, strMS, prefix, delimiter):
""" Parses a string containing metadata information. The string can be
read from the comment section of a .sur file, or, alternatively, a file
containing them with a similar formatting.
Parameters
----------
strMS: string containing metadata
prefix: string (or char) character assumed to start each line.
'$' if a .sur file.
delimiter: string that delimits the keyword from value. always '='
Returns
-------
dictMS: dictionnary in the correct hyperspy metadata format
"""
#dictMS is created as an empty dictionnary
dictMS = {}
#Title lines start with an underscore
TITLESTART = '{:s}_'.format(prefix)
for line in strMS.splitlines() :
#Here we ignore any empty line or line starting with @@
ignore = False
if not line.strip() or line.startswith('@@'):
ignore = True
#If the line must not be ignored
if not ignore:
if line.startswith(TITLESTART):
#We strip keys from whitespace at the end and beginning
keyMain = line[len(TITLESTART):].strip()
dictMS[keyMain] = {}
elif line.startswith(prefix):
key, *liValue = line.split(delimiter)
#Key is also stripped from beginning or end whitespace
key = key[len(prefix):].strip()
strValue = liValue[0] if len(liValue)>0 else ""
# remove whitespace at the beginning of value
strValue = strValue.strip()
liValue = strValue.split(' ')
try :
if key == "Grating":
dictMS[keyMain][key] = liValue[0] # we don't want to eval this one
else :
dictMS[keyMain][key] = eval(liValue[0])
except :
dictMS[keyMain][key] = liValue[0]
if len(liValue) > 1:
dictMS[keyMain][key+'_units'] = liValue[1]
return dictMS
### Post processing
def post_process_RGB(self,signal):
signal = signal.transpose()
max_data = np.nanmax(signal.data)
if max_data <=256:
signal.change_dtype('uint8')
signal.change_dtype('rgb8')
elif max_data <=65536:
signal.change_dtype('uint8')
signal.change_dtype('rgb8')
else:
warnings.warn("""RGB-announced data could not be converted to
uint8 or uint16 datatype""")
pass
return signal
### pack/unpack binary quantities
def _get_int16(self,file, default=None, signed=True):
"""Read a 16-bits int with a user-definable default value if
no file is given"""
if file is None :
return default
b = file.read(2)
if sys.byteorder == 'big' :
return struct.unpack('>h', b)[0]
else :
return struct.unpack('<h', b)[0]
def _set_int16(self, file, val):
file.write(struct.pack('<h', val))
def _get_str(self, file, size, default=None, encoding='latin-1'):
"""Read a str of defined size in bytes with a user-definable default
value if no file is given"""
if file is None :
return default
read_str = file.read(size).decode(encoding)
return read_str.strip(' \t\n')
def _set_str(self, file, val, size, encoding='latin-1'):
"""Write a str of defined size in bytes to a file. struct.pack
will automatically trim the string if it is too long"""
file.write(struct.pack('<{:d}s'.format(size),
'{{:<{:d}s}}'.format(size).format(val).encode(encoding)))
def _get_int32(self,file, default=None):
"""Read a 32-bits int with a user-definable default value if no
file is given"""
if file is None :
return default
b = file.read(4)
if sys.byteorder == 'big' :
return struct.unpack('>i', b)[0]
else :
return struct.unpack('<i', b)[0]
def _set_int32(self, file, val):
"""Write a 32-bits int in a file f """
file.write(struct.pack('<i', val))
def _get_float(self,file,default=None):
"""Read a 4-bytes (single precision) float from a binary file f with a
default value if no file is given"""
if file is None:
return default
return struct.unpack('<f', file.read(4))[0]
def _set_float(file, val):
"""write a 4-bytes (single precision) float in a file"""
file.write(struct.pack('<f', val))
def _get_uint32(self, file, default=None):
if file is None :
return default
b = file.read(4)
if sys.byteorder == 'big' :
return struct.unpack('>I', b)[0]
else :
return struct.unpack('<I', b)[0]
def _set_uint32(self, file, val):
file.write(struct.pack('<I', val))
def _get_bytes(self, file, size, default=None):
if file is None:
return default
else:
return file.read(size)
def _set_bytes(self, file, val, size):
file.write(struct.pack('<{:d}s'.format(size), val))
def _unpack_comment(self,file,encoding='latin-1'):
commentsize = self._get_work_dict_key_value("_50_Comment_size")
return self._get_str(file,commentsize,encoding)
def _pack_comment(self,file,val,encoding='latin-1'):
commentsize = self._get_work_dict_key_value("_50_Comment_size")
self._set_str(file,val,commentsize)
def _unpack_private(self,file,encoding='latin-1'):
privatesize = self._get_work_dict_key_value("_51_Private_size")
return self._get_str(file,privatesize,encoding)
def _pack_private(self,file,val,encoding='latin-1'):
privatesize = self._get_work_dict_key_value("_51_Private_size")
self._set_str(file,val,commentsize)
def _unpack_data(self,file,encoding='latin-1'):
"""This needs to be special because it reads until the end of
file. This causes an error in the series of data"""
#Size of datapoints in bytes. Always int16 (==2) or 32 (==4)
Psize = int(self._get_work_dict_key_value('_15_Size_of_Points')/8)
dtype = np.int16 if Psize == 2 else np.int32
if self._get_work_dict_key_value('_01_Signature') != 'DSCOMPRESSED' :
#If the points are not compressed we need to read the exact
#size occupied by datapoints
#Datapoints in X and Y dimensions
Npts_tot = self._get_work_dict_key_value('_20_Total_Nb_of_Pts')
#Datasize in WL
Wsize = self._get_work_dict_key_value('_14_W_Size')
#We need to take into account the fact that Wsize is often
#set to 0 instead of 1 in non-spectral data to compute the
#space occupied by data in the file
readsize = Npts_tot*Psize
if Wsize != 0:
readsize*=Wsize
#if Npts_channel is not 0:
# readsize*=Npts_channel
#Read the exact size of the data
_points = np.frombuffer(file.read(readsize),dtype=dtype)
#_points = np.fromstring(file.read(readsize),dtype=dtype)
else:
#If the points are compressed do the uncompress magic. There
#the space occupied by datapoints is self-taken care of.
#Number of streams
_directoryCount = self._get_uint32(file)
#empty lists to store the read sizes
rawLengthData = []
zipLengthData = []
for i in range(_directoryCount):
#Size of raw and compressed data sizes in each stream
rawLengthData.append(self._get_uint32(file))
zipLengthData.append(self._get_uint32(file))
#We now initialize an empty binary string to store the results
rawData = b''
for i in range(_directoryCount):
#And for each stream we uncompress using zip lib
#and add it to raw string
rawData += zlib.decompress(file.read(zipLengthData[i]))
#Finally numpy converts it to a numeric object
_points = np.frombuffer(rawData, dtype=dtype)
#_points = np.fromstring(rawData, dtype=dtype)
# rescale data
#We set non measured points to nan according to .sur ways
nm = []
if self._get_work_dict_key_value("_11_Special_Points") == 1 :
# has unmeasured points
nm = _points == self._get_work_dict_key_value("_16_Zmin")-2
#We set the point in the numeric scale
_points = _points.astype(float) \
* self._get_work_dict_key_value("_23_Z_Spacing") \
* self._get_work_dict_key_value("_35_Z_Unit_Ratio") \
+ self._get_work_dict_key_value("_55_Z_Offset")
_points[nm] = np.nan
#Return the points, rescaled
return _points
def _pack_data(self,file,val,encoding='latin-1'):
"""This needs to be special because it writes until the end of
file."""
datasize = self._get_work_dict_key_value("_62_points")
self._set_str(file,val,datasize)
def file_reader(filename,**kwds):
"""Read a mountainsmap .sur file and return a dictionnary containing the
information necessary for creating the data object
Parameters
----------
filename: name of the .sur file to be read
Returns
-------
signal_dict: dictionnary in the appropriate format. The dictionnary can
contain several keys including 'data', 'axes', 'metadata', 'original_metadata',
'post_process', 'mapping', 'attributes'.
"""
ds = DigitalSurfHandler(filename)
ds._read_sur_file()
surdict = ds._build_sur_dict()
return [surdict,]
|
ericpre/hyperspy
|
hyperspy/io_plugins/sur.py
|
Python
|
gpl-3.0
| 49,142
| 0.01691
|
from .base_executor import ScriptExecutor
from judgeenv import env
class RubyExecutor(ScriptExecutor):
ext = '.rb'
name = 'RUBY'
address_grace = 65536
fs = ['.*\.(?:so|rb$)', '/etc/localtime$', '/dev/urandom$', '/proc/self', '/usr/lib/ruby/gems/']
test_program = 'puts gets'
@classmethod
def get_command(cls):
return env['runtime'].get(cls.name.lower())
|
buhe/judge
|
executors/ruby.py
|
Python
|
agpl-3.0
| 393
| 0.005089
|
import attr
from navmazing import NavigateToAttribute
from navmazing import NavigateToSibling
from cfme.common import Taggable
from cfme.common import TagPageView
from cfme.containers.provider import ContainerObjectAllBaseView
from cfme.containers.provider import ContainerObjectDetailsBaseView
from cfme.containers.provider import GetRandomInstancesMixin
from cfme.containers.provider import Labelable
from cfme.containers.provider import LoggingableView
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.providers import get_crud_by_name
class ServiceView(ContainerObjectAllBaseView, LoggingableView):
"""Container Nodes view"""
@property
def in_service(self):
"""Determine if the Service page is currently open"""
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Containers', 'Container Services']
)
class ServiceAllView(ServiceView):
"""Container Services All view"""
SUMMARY_TEXT = "Container Services"
@property
def is_displayed(self):
return self.in_service and super().is_displayed
class ServiceDetailsView(ContainerObjectDetailsBaseView):
"""Container Services Details view"""
SUMMARY_TEXT = "Container Services"
@attr.s
class Service(BaseEntity, Taggable, Labelable):
PLURAL = 'Container Services'
all_view = ServiceAllView
details_view = ServiceDetailsView
name = attr.ib()
project_name = attr.ib()
provider = attr.ib()
@attr.s
class ServiceCollection(GetRandomInstancesMixin, BaseCollection):
"""Collection object for :py:class:`Service`."""
ENTITY = Service
def all(self):
# container_services table has ems_id, join with ext_mgmgt_systems on id for provider name
# Then join with container_projects on the id for the project
service_table = self.appliance.db.client['container_services']
ems_table = self.appliance.db.client['ext_management_systems']
project_table = self.appliance.db.client['container_projects']
service_query = (
self.appliance.db.client.session
.query(service_table.name, project_table.name, ems_table.name)
.join(ems_table, service_table.ems_id == ems_table.id)
.join(project_table, service_table.container_project_id == project_table.id))
provider = None
# filtered
if self.filters.get('provider'):
provider = self.filters.get('provider')
service_query = service_query.filter(ems_table.name == provider.name)
services = []
for name, project_name, ems_name in service_query.all():
services.append(self.instantiate(name=name, project_name=project_name,
provider=provider or get_crud_by_name(ems_name)))
return services
@navigator.register(ServiceCollection, 'All')
class All(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
VIEW = ServiceAllView
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select('Compute', 'Containers', 'Container Services')
def resetter(self, *args, **kwargs):
# Reset view and selection
self.view.toolbar.view_selector.select("List View")
self.view.paginator.reset_selection()
@navigator.register(Service, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToAttribute('parent', 'All')
VIEW = ServiceDetailsView
def step(self, *args, **kwargs):
search_visible = self.prerequisite_view.entities.search.is_displayed
self.prerequisite_view.entities.get_entity(name=self.obj.name,
project_name=self.obj.project_name,
surf_pages=not search_visible,
use_search=search_visible).click()
@navigator.register(Service, 'EditTags')
class EditTags(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
|
nachandr/cfme_tests
|
cfme/containers/service.py
|
Python
|
gpl-2.0
| 4,417
| 0.002264
|
import sys
import gzip
import logging
from csv import DictReader
from datetime import datetime
from . import app, connect_db
from ..constants import DEFAULT_GENOME_BUILD, BENIGN, UNCERTAIN, UNKNOWN, PATHOGENIC
from ..extensions import mongo
from ..backend import build_variant_doc, get_variant_category, update_variant_task, create_variant_task, run_variant_tasks
from ..services.notifier import UpdateNotifier
logging.basicConfig(format="%(levelname)s (%(name)s %(lineno)s): %(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def iter_variants(filename):
with gzip.open(filename, 'rt') as ifp:
for row in DictReader(ifp, dialect='excel-tab'):
yield row
def did_variant_category_change(old_doc, new_doc):
old_category = get_variant_category(old_doc)
new_category = get_variant_category(new_doc)
return old_category != new_category
def iter_variant_updates(db, variants):
for variant in variants:
new_doc = build_variant_doc(DEFAULT_GENOME_BUILD, **variant)
doc_id = new_doc['_id']
old_doc = db.variants.find_one({ '_id': doc_id })
if did_variant_category_change(old_doc, new_doc):
yield (old_doc, new_doc)
def main(clinvar_filename):
db = connect_db()
notifier = UpdateNotifier(db, app.config)
started_at = datetime.utcnow()
task_list = []
variant_iterator = iter_variants(clinvar_filename)
for i, (old_doc, new_doc) in enumerate(iter_variant_updates(db, variant_iterator)):
if i % 10000 == 0:
logger.debug('Processed {} variants'.format(i))
if old_doc:
# Variant is already known, either:
# - someone subscribed before it was added to clinvar, or
# - it was already in clinvar, and we might have new annotations
task = update_variant_task(db, old_doc, new_doc)
else:
# Add clinvar annotations with empty subscriber data
task = create_variant_task(db, new_doc)
task_list.append(task)
results = run_variant_tasks(db, task_list, notifier=notifier)
logger.debug('Variants updated. Results: {}'.format(results))
db.updates.insert_one({
'started_at': started_at,
'finished_at': datetime.utcnow(),
'inserted_count': results['inserted'],
'modified_count': results['modified'],
'notified_count': results['notified'],
})
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Update ClinVar data')
parser.add_argument('clinvar_filename', metavar='CLINVAR_ALLELES_TSV_GZ', type=str,
help='clinvar_alleles.single.b*.tsv.gz from github.com/macarthur-lab/clinvar pipeline')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args.clinvar_filename)
|
buske/variant-subscription-service
|
vss/scripts/import.py
|
Python
|
gpl-3.0
| 2,871
| 0.002438
|
"""
Lateral distribution functions that can be used for simulating particle
densities and for fitting to data.
Example usage::
>>> import tables
>>> from sapphire import NkgLdfSimulation, ScienceParkCluster
>>> data = tables.open_file('/tmp/test_ldf_simulation.h5', 'w')
>>> cluster = ScienceParkCluster()
>>> sim = NkgLdfSimulation(max_core_distance=400, min_energy=1e15,
... max_energy=1e21, cluster=cluster,
... datafile=data, n=200)
>>> sim.run()
"""
import warnings
from numpy import arctan2, cos, log10, pi, random, sin, sqrt
from scipy.special import gamma
from ..utils import pbar, vector_length
from .detector import ErrorlessSimulation, HiSPARCSimulation
class BaseLdfSimulation(HiSPARCSimulation):
def __init__(self, max_core_distance, min_energy, max_energy, *args,
**kwargs):
"""Simulation initialization
:param max_core_distance: maximum distance of shower core to
center of cluster (in meters).
:param min_energy,max_energy: Minimum and maximum energy of the
shower (in eV).
"""
super(BaseLdfSimulation, self).__init__(*args, **kwargs)
self.ldf = BaseLdf()
self.max_core_distance = max_core_distance
self.min_energy = min_energy
self.max_energy = max_energy
# The cluster is not moved, so detector positions can be stored.
for station in self.cluster.stations:
for detector in station.detectors:
detector.xy_coordinates = detector.get_xy_coordinates()
def generate_shower_parameters(self):
"""Generate shower parameters, i.e. core position
For the simple LDF only the core position is relevant. It
assumes the shower to come from the Zenith.
:return: dictionary with shower parameters: core_pos
(x, y-tuple).
"""
r = self.max_core_distance
giga = int(1e9)
for i in pbar(range(self.n), show=self.progress):
energy = self.generate_energy(self.min_energy, self.max_energy)
size = 10 ** (log10(energy) - 15 + 4.8)
shower_parameters = {'ext_timestamp': (giga + i) * giga,
'azimuth': self.generate_azimuth(),
'zenith': 0.,
'core_pos': self.generate_core_position(r),
'size': size,
'energy': energy}
yield shower_parameters
def simulate_detector_response(self, detector, shower_parameters):
"""Simulate detector response to a shower
Get the mips in a detector from the LDF.
:param detector: :class:`~sapphire.clusters.Detector` for which
the observables will be determined.
:param shower_parameters: dictionary with the shower parameters.
"""
n_detected = self.get_num_particles_in_detector(detector,
shower_parameters)
theta = shower_parameters['zenith']
if n_detected:
mips = self.simulate_detector_mips(n_detected, theta)
observables = {'n': mips}
else:
observables = {'n': 0.}
return observables
def get_num_particles_in_detector(self, detector, shower_parameters):
"""Get the number of particles in a detector
:param detector: :class:`~sapphire.clusters.Detector` for which
the number of particles will be determined.
:param shower_parameters: dictionary with the shower parameters.
"""
x, y = detector.xy_coordinates
core_x, core_y = shower_parameters['core_pos']
zenith = shower_parameters['zenith']
azimuth = shower_parameters['azimuth']
size = shower_parameters['size']
r = self.ldf.calculate_core_distance(x, y, core_x, core_y, zenith,
azimuth)
p_shower = self.ldf.calculate_ldf_value(r, n_electrons=size)
p_ground = p_shower * cos(zenith)
num_particles = self.simulate_particles_for_density(
p_ground * detector.get_area())
return num_particles
@staticmethod
def simulate_particles_for_density(p):
"""Get number of particles in detector given a particle density
:param p: particle density in number per detector area.
:return: random number from Poisson distribution.
"""
return random.poisson(p)
class BaseLdfSimulationWithoutErrors(ErrorlessSimulation, BaseLdfSimulation):
"""This simulation does not simulate errors/uncertainties
This should result in perfect particle counting for the detectors.
"""
@staticmethod
def simulate_particles_for_density(p):
"""Exact number"""
return p
class NkgLdfSimulation(BaseLdfSimulation):
"""Same as the BaseLdfSimulation but uses the NkgLdf as LDF"""
def __init__(self, *args, **kwargs):
super(NkgLdfSimulation, self).__init__(*args, **kwargs)
self.ldf = NkgLdf()
class NkgLdfSimulationWithoutErrors(NkgLdfSimulation,
BaseLdfSimulationWithoutErrors):
"""Same as the NkgLdfSimulation but without error simulation"""
pass
class KascadeLdfSimulation(BaseLdfSimulation):
"""Same as the BaseLdfSimulation but uses the KascadeLdf as LDF"""
def __init__(self, *args, **kwargs):
super(KascadeLdfSimulation, self).__init__(*args, **kwargs)
self.ldf = KascadeLdf()
class KascadeLdfSimulationWithoutErrors(KascadeLdfSimulation,
BaseLdfSimulationWithoutErrors):
"""Same as the KascadeLdfSimulation but without error simulation"""
pass
class EllipsLdfSimulation(BaseLdfSimulation):
"""Same as BaseLdfSimulation but uses the EllipsLdF as LDF"""
def __init__(self, *args, **kwargs):
super(EllipsLdfSimulation, self).__init__(*args, **kwargs)
self.ldf = EllipsLdf()
def generate_shower_parameters(self):
"""Generate shower parameters, i.e. core position
For the elliptic LDF both the core position and the zenith angle
are relevant.
:return: dictionary with shower parameters: core_pos
(x, y-tuple).
"""
r = self.max_core_distance
giga = int(1e9)
for i in pbar(range(self.n), show=self.progress):
energy = self.generate_energy(self.min_energy, self.max_energy)
size = 10 ** (log10(energy) - 15 + 4.8)
shower_parameters = {'ext_timestamp': (giga + i) * giga,
'azimuth': self.generate_azimuth(),
'zenith': self.generate_zenith(),
'core_pos': self.generate_core_position(r),
'size': size,
'energy': energy}
yield shower_parameters
def get_num_particles_in_detector(self, detector, shower_parameters):
"""Get the number of particles in a detector
:param detector: :class:`~sapphire.clusters.Detector` for which
the number of particles will be determined.
:param shower_parameters: dictionary with the shower parameters.
"""
x, y = detector.xy_coordinates
core_x, core_y = shower_parameters['core_pos']
zenith = shower_parameters['zenith']
azimuth = shower_parameters['azimuth']
size = shower_parameters['size']
r, phi = self.ldf.calculate_core_distance_and_angle(x, y, core_x,
core_y)
p_ground = self.ldf.calculate_ldf_value(r, phi, size, zenith, azimuth)
num_particles = self.simulate_particles_for_density(
p_ground * detector.get_area())
return num_particles
class BaseLdf(object):
"""Base LDF class
No particles! Always returns a particle density of 0.
"""
def calculate_ldf_value(self, r, n_electrons=None, s=None):
return 0.
def calculate_core_distance(self, x, y, x0, y0, theta, phi):
"""Calculate core distance
The core distance is the distance of the detector to the shower core,
measured *on the shower front*. For derivations, see logbook.
:param x,y: detector position in m.
:param x0,y0: shower core position in m.
:param theta,phi: shower axis direction in radians.
:return: distance from detector to the shower core in shower
front plane in m.
"""
x = x - x0
y = y - y0
return sqrt(x ** 2 + y ** 2 -
(x * cos(phi) + y * sin(phi)) ** 2 * sin(theta) ** 2)
class NkgLdf(BaseLdf):
"""The Nishimura-Kamata-Greisen function"""
# shower parameters
# Age parameter and Moliere radius from Thoudam2012 sec 5.6.
_n_electrons = 10 ** 4.8
_s = 1.7
_r0 = 30.
def __init__(self, n_electrons=None, s=None):
"""NKG LDF setup
:param n_electrons: Shower size (number of electrons).
:param s: Shower age parameter.
"""
if n_electrons is not None:
self._n_electrons = n_electrons
if s is not None:
self._s = s
self._cache_c_s_value()
def _cache_c_s_value(self):
"""Store the c_s value
The c_s value does not change if s and r0 are fixed.
"""
self._c_s = self._c(self._s)
def calculate_ldf_value(self, r, n_electrons=None, s=None):
"""Calculate the LDF value
:param r: core distance in m.
:param n_electrons: number of electrons in the shower.
:param s: shower age parameter.
:return: particle density in m ** -2.
"""
if n_electrons is None:
n_electrons = self._n_electrons
if s is None:
s = self._s
return self.ldf_value(r, n_electrons, s)
def ldf_value(self, r, n_electrons, s):
"""Calculate the LDF value
Given a core distance, shower size, and shower age.
As given in Fokkema2012 eq 7.2.
:param r: core distance in m.
:param n_electrons: number of electrons in the shower.
:param s: shower age parameter.
:return: particle density in m ** -2.
"""
if s == self._s:
c_s = self._c_s
else:
c_s = self._c(s)
r0 = self._r0
return (n_electrons * c_s * (r / r0) ** (s - 2) *
(1 + r / r0) ** (s - 4.5))
def _c(self, s):
"""Part of the LDF
As given in Fokkema2012 eq 7.3.
:param s: shower age parameter.
:return: c(s)
"""
r0 = self._r0
return (gamma(4.5 - s) /
(2 * pi * r0 ** 2 * gamma(s) * gamma(4.5 - 2 * s)))
class KascadeLdf(NkgLdf):
"""The KASCADE modified NKG function"""
# shower parameters
# Values from Fokkema2012 sec 7.1.
_n_electrons = 10 ** 4.8
_s = 0.94 # Shape parameter
_r0 = 40.
_alpha = 1.5
_beta = 3.6
def ldf_value(self, r, n_electrons, s):
"""Calculate the LDF value
Given a core distance, shower size, and shower age.
As given in Fokkema2012 eq 7.4.
:param r: core distance in m.
:param n_electrons: number of electrons in the shower.
:param s: shower shape parameter.
:return: particle density in m ** -2.
"""
if s == self._s:
c_s = self._c_s
else:
c_s = self._c(s)
r0 = self._r0
alpha = self._alpha
beta = self._beta
return (n_electrons * c_s * (r / r0) ** (s - alpha) *
(1 + r / r0) ** (s - beta))
def _c(self, s):
"""Part of the LDF
As given in Fokkema2012 eq 7.5.
:param s: shower shape parameter.
:return: c(s)
"""
r0 = self._r0
beta = self._beta
alpha = self._alpha
return (gamma(beta - s) /
(2 * pi * r0 ** 2 * gamma(s - alpha + 2) *
gamma(alpha + beta - 2 * s - 2)))
class EllipsLdf(KascadeLdf):
"""The NKG function modified for leptons and azimuthal asymmetry"""
# shower parameters
# Values from Montanus, paper to follow.
_n_electrons = 10 ** 4.8
_s1 = -.5 # Shape parameter
_s2 = -2.6 # Shape parameter
_r0 = 30.
_zenith = 0.
_azimuth = 0.
def __init__(self, n_electrons=None, zenith=None, azimuth=None, s1=None,
s2=None):
if n_electrons is not None:
self._n_electrons = n_electrons
if zenith is not None:
self._zenith = zenith
if azimuth is not None:
self._azimuth = azimuth
if s1 is not None:
self._s1 = s1
if s2 is not None:
self._s2 = s2
self._cache_c_s_value()
def _cache_c_s_value(self):
"""Store the c_s value
The c_s value does not change if s1, s2 and r0 are fixed.
"""
self._c_s = self._c(self._s1, self._s2)
def calculate_ldf_value(self, r, phi, n_electrons=None, zenith=None,
azimuth=None):
"""Calculate the LDF value for a given core distance and polar angle
:param r: core distance in m.
:param phi: polar angle in rad.
:param n_electrons: number of electrons in the shower.
:return: particle density in m ** -2.
"""
if n_electrons is None:
n_electrons = self._n_electrons
if zenith is None:
zenith = self._zenith
if azimuth is None:
azimuth = self._azimuth
return self.ldf_value(r, phi, n_electrons, zenith, azimuth, self._s1,
self._s2)
def ldf_value(self, r, phi, n_electrons, zenith, azimuth, s1, s2):
"""Calculate the LDF value
Given a core distance, core polar angle, zenith angle, azimuth angle,
shower size and three shape parameters (r0, s1, s2) .
As given by Montanus, paper to follow.
.. warning::
The value 11.24 in the expression: muoncorr is only valid
for: s1 = -.5, s2 = - 2.6 and r0 = 30.
:param r: core distance in m.
:param phi: polar angle in rad.
:param n_electrons: number of electrons in the shower.
:param zenith: zenith angle in rad.
:param azimuth: azimuth angle in rad.
:param s1: shower shape parameter.
:param s2: shower shape parameter.
:return: particle density in m ** -2.
"""
if s1 == self._s1 and s2 == self._s2:
c_s = self._c_s
else:
c_s = self._c(s1, s2)
r0 = self._r0
zenith = self._zenith
azimuth = self._azimuth
relcos = cos(phi - azimuth)
ell = sqrt(1 - sin(zenith) * sin(zenith) * relcos * relcos)
shift = -0.0575 * sin(2 * zenith) * r * relcos
k = shift + r * ell
term1 = k / r0
term2 = 1 + k / r0
muoncorr = 1 + k / (11.24 * r0) # See warning in docstring.
with warnings.catch_warnings(record=True):
p = (n_electrons * c_s * cos(zenith) * term1 ** s1 * term2 ** s2 *
muoncorr)
return p
def _c(self, s1, s2):
"""Normalization of the LDF
As given in Montanus, paper to follow.
:param s1: shower shape parameter.
:param s2: shower shape parameter.
:return: c(s1,s2)
"""
r0 = self._r0
return (gamma(-s2) /
(2 * pi * r0 ** 2 * gamma(s1 + 2) * gamma(-s1 - s2 - 2)))
def calculate_core_distance_and_angle(self, x, y, x0, y0):
"""Calculate core distance
The core distance is the distance of the detector to the shower core,
measured *in the horizontal observation plane*.
:param x,y: detector position in m.
:param x0,y0: shower core position in m.
:return: distance and polar angle from detector to the shower core in
horizontal observation plane in m resp. rad.
"""
x = x - x0
y = y - y0
return vector_length(x, y), arctan2(y, x)
|
HiSPARC/sapphire
|
sapphire/simulations/ldf.py
|
Python
|
gpl-3.0
| 16,510
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2017 University of Oxford
#
# This file is part of msprime.
#
# msprime is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# msprime is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with msprime. If not, see <http://www.gnu.org/licenses/>.
#
"""
Module responsible to generating and reading tree files.
"""
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import sys
import base64
try:
import numpy as np
_numpy_imported = True
except ImportError:
_numpy_imported = False
import _msprime
import msprime.drawing as drawing
import msprime.provenance as provenance
import msprime.tables as tables
from _msprime import NODE_IS_SAMPLE
NULL_NODE = -1
NULL_POPULATION = -1
NULL_MUTATION = -1
IS_PY2 = sys.version_info[0] < 3
def check_numpy():
if not _numpy_imported:
raise RuntimeError("numpy is required for this operation.")
CoalescenceRecord = collections.namedtuple(
"CoalescenceRecord",
["left", "right", "node", "children", "time", "population"])
# TODO this interface is rubbish. Should have much better printing options.
# TODO we should be use __slots__ here probably.
class SimpleContainer(object):
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return repr(self.__dict__)
class Node(SimpleContainer):
"""
A :ref:`node <sec_node_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar id: The integer ID of this node. Varies from 0 to
:attr:`.TreeSequence.num_nodes` - 1.
:vartype id: int
:ivar flags: The bitwise flags for this node.
:vartype flags: int
:ivar time: The birth time of the individual represented by this node.
:vartype float: float
:ivar population: The integer ID of the population that this node was born in.
:vartype population: int
:ivar metadata: The :ref:`metadata <sec_metadata_definition>` for this node.
:vartype metadata: bytes
"""
def __init__(
self, id_=None, flags=0, time=0, population=NULL_POPULATION, metadata=""):
self.id = id_
self.time = time
self.population = population
self.metadata = metadata
self.flags = flags
def is_sample(self):
"""
Returns True if this node is a sample. This value is derived from the
``flag`` variable.
:rtype: bool
"""
return self.flags & NODE_IS_SAMPLE
class Edge(SimpleContainer):
"""
An :ref:`edge <sec_edge_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar left: The left coordinate of this edge.
:vartype left: float
:ivar right: The right coordinate of this edge.
:vartype right: float
:ivar parent: The integer ID of the parent node for this edge.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype parent: int
:ivar child: The integer ID of the child node for this edge.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype child: int
"""
def __init__(self, left, right, parent, child):
self.left = left
self.right = right
self.parent = parent
self.child = child
def __repr__(self):
return "{{left={:.3f}, right={:.3f}, parent={}, child={}}}".format(
self.left, self.right, self.parent, self.child)
class Site(SimpleContainer):
"""
A :ref:`site <sec_site_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar id: The integer ID of this site. Varies from 0 to
:attr:`.TreeSequence.num_sites` - 1.
:vartype id: int
:ivar position: The floating point location of this site in genome coordinates.
Ranges from 0 (inclusive) to :attr:`.TreeSequence.sequence_length`
(exclusive).
:vartype position: float
:ivar ancestral_state: The ancestral state at this site (i.e., the state
inherited by nodes, unless mutations occur).
:vartype ancestral_state: str
:ivar metadata: The :ref:`metadata <sec_metadata_definition>` for this site.
:vartype metadata: bytes
:ivar mutations: The list of mutations at this site. Mutations
within a site are returned in the order they are specified in the
underlying :class:`.MutationTable`.
:vartype mutations: list[:class:`.Mutation`]
"""
def __init__(self, id_, position, ancestral_state, mutations, metadata):
self.id = id_
self.position = position
self.ancestral_state = ancestral_state
self.mutations = mutations
self.metadata = metadata
class Mutation(SimpleContainer):
"""
A :ref:`mutation <sec_mutation_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar id: The integer ID of this mutation. Varies from 0 to
:attr:`.TreeSequence.num_mutations` - 1.
:vartype id: int
:ivar site: The integer ID of the site that this mutation occurs at. To obtain
further information about a site with a given ID use
:meth:`.TreeSequence.site`.
:vartype site: int
:ivar node: The integer ID of the first node that inherits this mutation.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype node: int
:ivar derived_state: The derived state for this mutation. This is the state
inherited by nodes in the subtree rooted at this mutation's node, unless
another mutation occurs.
:vartype derived_state: str
:ivar parent: The integer ID of this mutation's parent mutation. When multiple
mutations occur at a site along a path in the tree, mutations must
record the mutation that is immediately above them. If the mutation does
not have a parent, this is equal to the :const:`NULL_MUTATION` (-1).
To obtain further information about a mutation with a given ID, use
:meth:`.TreeSequence.mutation`.
:vartype parent: int
:ivar metadata: The :ref:`metadata <sec_metadata_definition>` for this site.
:vartype metadata: bytes
"""
def __init__(self, id_, site, node, derived_state, parent, metadata):
self.id = id_
self.site = site
self.node = node
self.derived_state = derived_state
self.parent = parent
self.metadata = metadata
class Migration(SimpleContainer):
"""
A :ref:`migration <sec_migration_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar left: The left end of the genomic interval covered by this
migration (inclusive).
:vartype left: float
:ivar right: The right end of the genomic interval covered by this migration
(exclusive).
:vartype right: float
:ivar node: The integer ID of the node involved in this migration event.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype node: int
:ivar source: The source population ID.
:vartype source: int
:ivar dest: The destination population ID.
:vartype dest: int
:ivar time: The time at which this migration occured at.
:vartype time: float
"""
def __init__(self, left, right, node, source, dest, time):
self.left = left
self.right = right
self.node = node
self.source = source
self.dest = dest
self.time = time
class Variant(SimpleContainer):
"""
A variant is represents the observed variation among the samples
for a given site. A variant consists (a) of a reference to the
:class:`.Site` instance in question; (b) the **alleles** that may be
observed at the samples for this site; and (c) the **genotypes**
mapping sample IDs to the observed alleles.
Each element in the ``alleles`` tuple is a string, representing the
actual observed state for a given sample. The first element of this
tuple is guaranteed to be the same as the site's ``ancestral_state`` value.
The list of alleles is also guaranteed not to contain any duplicates.
However, allelic values may be listed that are not referred to by any
samples. For example, if we have a site that is fixed for the derived state
(i.e., we have a mutation over the tree root), all genotypes will be 1, but
the alleles list will be equal to ``('0', '1')``. Other than the
ancestral state being the first allele, the alleles are listed in
no particular order, and the ordering should not be relied upon.
The ``genotypes`` represent the observed allelic states for each sample,
such that ``var.alleles[var.genotypes[j]]`` gives the string allele
for sample ID ``j``. Thus, the elements of the genotypes array are
indexes into the ``alleles`` list. The genotypes are provided in this
way via a numpy array to enable efficient calculations.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar site: The site object for this variant.
:vartype site: :class:`.Site`
:ivar alleles: A tuple of the allelic values that may be observed at the
samples at the current site. The first element of this tuple is always
the sites's ancestral state.
:vartype alleles: tuple(str)
:ivar genotypes: An array of indexes into the list ``alleles``, giving the
state of each sample at the current site.
:vartype genotypes: numpy.ndarray
"""
def __init__(self, site, alleles, genotypes):
self.site = site
self.alleles = alleles
self.genotypes = genotypes
# Deprecated aliases to avoid breaking existing code.
self.position = site.position
self.index = site.id
class Edgeset(SimpleContainer):
def __init__(self, left, right, parent, children):
self.left = left
self.right = right
self.parent = parent
self.children = children
def __repr__(self):
return "{{left={:.3f}, right={:.3f}, parent={}, children={}}}".format(
self.left, self.right, self.parent, self.children)
class Provenance(SimpleContainer):
def __init__(self, id_=None, timestamp=None, record=None):
self.id = id_
self.timestamp = timestamp
self.record = record
def add_deprecated_mutation_attrs(site, mutation):
"""
Add in attributes for the older deprecated way of defining
mutations. These attributes will be removed in future releases
and are deliberately undocumented in version 0.5.0.
"""
mutation.position = site.position
mutation.index = site.id
return mutation
class SparseTree(object):
"""
A SparseTree is a single tree in a :class:`.TreeSequence`. The SparseTree
implementation differs from most tree implementations by using **integer
node IDs** to refer to nodes rather than objects. Thus, when we wish to
find the parent of the node with ID '0', we use ``tree.parent(0)``, which
returns another integer. If '0' does not have a parent in the current tree
(e.g., if it is a root), then the special value :const:`.NULL_NODE`
(:math:`-1`) is returned. The children of a node are found using the
:meth:`.children` method. To obtain information about a particular node,
one may either use ``tree.tree_sequence.node(u)`` to obtain the
corresponding :class:`Node` instance, or use the :meth:`.time` or
:meth:`.population` shorthands. Tree traversals in various orders
is possible using the :meth:`.SparseTree.nodes` iterator.
Sparse trees are not intended to be instantiated directly, and are
obtained as part of a :class:`.TreeSequence` using the
:meth:`.trees` method.
"""
def __init__(self, ll_sparse_tree, tree_sequence):
self._ll_sparse_tree = ll_sparse_tree
self._tree_sequence = tree_sequence
@property
def tree_sequence(self):
"""
Returns the tree sequence that this tree is from.
:return: The parent tree sequence for this tree.
:rtype: :class:`.TreeSequence`
"""
return self._tree_sequence
def get_branch_length(self, u):
# Deprecated alias for branch_length
return self.branch_length(u)
def branch_length(self, u):
"""
Returns the length of the branch (in generations) joining the
specified node to its parent. This is equivalent to
>>> tree.time(tree.parent(u)) - tree.time(u)
Note that this is not related to the value returned by
:attr:`.length`, which describes the length of the interval
covered by the tree in genomic coordinates.
:param int u: The node of interest.
:return: The branch length from u to its parent.
:rtype: float
"""
return self.time(self.get_parent(u)) - self.time(u)
def get_total_branch_length(self):
# Deprecated alias for total_branch_length
return self.total_branch_length
@property
def total_branch_length(self):
"""
Returns the sum of all the branch lengths in this tree (in
units of generations). This is equivalent to
>>> sum(
>>> tree.branch_length(u) for u in tree.nodes()
>>> if u not in self.roots)
:return: The sum of all the branch lengths in this tree.
:rtype: float
"""
return sum(
self.get_branch_length(u) for u in self.nodes() if u not in self.roots)
def get_mrca(self, u, v):
# Deprecated alias for mrca
return self.mrca(u, v)
def mrca(self, u, v):
"""
Returns the most recent common ancestor of the specified nodes.
:param int u: The first node.
:param int v: The second node.
:return: The most recent common ancestor of u and v.
:rtype: int
"""
return self._ll_sparse_tree.get_mrca(u, v)
def get_tmrca(self, u, v):
# Deprecated alias for tmrca
return self.tmrca(u, v)
def tmrca(self, u, v):
"""
Returns the time of the most recent common ancestor of the specified
nodes. This is equivalent to::
>>> tree.time(tree.mrca(u, v))
:param int u: The first node.
:param int v: The second node.
:return: The time of the most recent common ancestor of u and v.
:rtype: float
"""
return self.get_time(self.get_mrca(u, v))
def get_parent(self, u):
# Deprecated alias for parent
return self.parent(u)
def parent(self, u):
"""
Returns the parent of the specified node. Returns
the :const:`.NULL_NODE` if u is the root or is not a node in
the current tree.
:param int u: The node of interest.
:return: The parent of u.
:rtype: int
"""
return self._ll_sparse_tree.get_parent(u)
# Quintuply linked tree structure.
def left_child(self, u):
return self._ll_sparse_tree.get_left_child(u)
def right_child(self, u):
return self._ll_sparse_tree.get_right_child(u)
def left_sib(self, u):
return self._ll_sparse_tree.get_left_sib(u)
def right_sib(self, u):
return self._ll_sparse_tree.get_right_sib(u)
# TODO do we also have right_root?
@property
def left_root(self):
return self._ll_sparse_tree.get_left_root()
def get_children(self, u):
# Deprecated alias for self.children
return self.children(u)
def children(self, u):
"""
Returns the children of the specified node ``u`` as a tuple of integer node IDs.
If ``u`` is a leaf, return the empty tuple.
:param int u: The node of interest.
:return: The children of ``u`` as a tuple of integers
:rtype: tuple(int)
"""
return self._ll_sparse_tree.get_children(u)
def get_time(self, u):
# Deprecated alias for self.time
return self.time(u)
def time(self, u):
"""
Returns the time of the specified node in generations.
Equivalent to ``tree.tree_sequence.node(u).time``.
:param int u: The node of interest.
:return: The time of u.
:rtype: float
"""
return self._ll_sparse_tree.get_time(u)
def get_population(self, u):
# Deprecated alias for self.population
return self.population(u)
def population(self, u):
"""
Returns the population associated with the specified node.
Equivalent to ``tree.tree_sequence.node(u).population``.
:param int u: The node of interest.
:return: The ID of the population associated with node u.
:rtype: int
"""
return self._ll_sparse_tree.get_population(u)
def is_internal(self, u):
"""
Returns True if the specified node is not a leaf. A node is internal
if it has one or more children in the current tree.
:param int u: The node of interest.
:return: True if u is not a leaf node.
:rtype: bool
"""
return not self.is_leaf(u)
def is_leaf(self, u):
"""
Returns True if the specified node is a leaf. A node :math:`u` is a
leaf if it has zero children.
:param int u: The node of interest.
:return: True if u is a leaf node.
:rtype: bool
"""
return len(self.children(u)) == 0
def is_sample(self, u):
"""
Returns True if the specified node is a sample. A node :math:`u` is a
sample if it has been marked as a sample in the parent tree sequence.
:param int u: The node of interest.
:return: True if u is a sample.
:rtype: bool
"""
return bool(self._ll_sparse_tree.is_sample(u))
@property
def num_nodes(self):
"""
Returns the number of nodes in the sparse tree.
:rtype: int
"""
return self._ll_sparse_tree.get_num_nodes()
@property
def num_roots(self):
"""
The number of roots in this tree, as defined in the :attr:`.roots` attribute.
Requires O(number of roots) time.
:rtype: int
"""
return self._ll_sparse_tree.get_num_roots()
@property
def roots(self):
"""
The list of roots in this tree. A root is defined as a unique endpoint of
the paths starting at samples. We can define the set of roots as follows:
.. code-block:: python
roots = set()
for u in tree_sequence.samples():
while tree.parent(u) != msprime.NULL_NODE:
u = tree.parent(u)
roots.add(u)
# roots is now the set of all roots in this tree.
assert sorted(roots) == sorted(tree.roots)
The roots of the tree are returned in a list, in no particular order.
Requires O(number of roots) time.
:return: The list of roots in this tree.
:rtype: list
"""
roots = []
u = self.left_root
while u != NULL_NODE:
roots.append(u)
u = self.right_sib(u)
return roots
def get_root(self):
# Deprecated alias for self.root
return self.root
@property
def root(self):
"""
The root of this tree. If the tree contains multiple roots, a ValueError is
raised indicating that the :attr:`.roots` attribute should be used instead.
:return: The root node.
:rtype: int
:raises: :class:`ValueError` if this tree contains more than one root.
"""
root = self.left_root
if root != NULL_NODE and self.right_sib(root) != NULL_NODE:
raise ValueError("More than one root exists. Use tree.roots instead")
return root
def get_index(self):
# Deprecated alias for self.index
return self.index
@property
def index(self):
"""
Returns the index this tree occupies in the parent tree sequence.
This index is zero based, so the first tree in the sequence has index 0.
:return: The index of this tree.
:rtype: int
"""
return self._ll_sparse_tree.get_index()
def get_interval(self):
# Deprecated alias for self.interval
return self.interval
@property
def interval(self):
"""
Returns the coordinates of the genomic interval that this tree
represents the history of. The interval is returned as a tuple
:math:`(l, r)` and is a half-open interval such that the left
coordinate is inclusive and the right coordinate is exclusive. This
tree therefore applies to all genomic locations :math:`x` such that
:math:`l \leq x < r`.
:return: A tuple (l, r) representing the left-most (inclusive)
and right-most (exclusive) coordinates of the genomic region
covered by this tree.
:rtype: tuple
"""
return self._ll_sparse_tree.get_left(), self._ll_sparse_tree.get_right()
def get_length(self):
# Deprecated alias for self.length
return self.length
@property
def length(self):
"""
Returns the length of the genomic interval that this tree represents.
This is defined as :math:`r - l`, where :math:`(l, r)` is the genomic
interval returned by :attr:`.interval`.
:return: The length of the genomic interval covered by this tree.
:rtype: int
"""
left, right = self.get_interval()
return right - left
# The sample_size (or num_samples) is really a property of the tree sequence,
# and so we should provide access to this via a tree.tree_sequence.num_samples
# property access. However, we can't just remove the method as a lot of code
# may depend on it. To complicate things a bit more, sample_size has been
# changed to num_samples elsewhere for consistency. We can't do this here
# because there is already a num_samples method which returns the number of
# samples below a particular node. The best thing to do is probably to
# undocument the sample_size property, but keep it around for ever.
def get_sample_size(self):
# Deprecated alias for self.sample_size
return self.sample_size
@property
def sample_size(self):
"""
Returns the sample size for this tree. This is the number of sample
nodes in the tree.
:return: The number of sample nodes in the tree.
:rtype: int
"""
return self._ll_sparse_tree.get_sample_size()
def draw(
self, path=None, width=None, height=None,
node_labels=None, node_colours=None,
mutation_labels=None, mutation_colours=None,
format=None):
"""
Returns a drawing of this tree.
When working in a Jupyter notebook, use the ``IPython.display.SVG``
function to display the SVG output from this function inline in the notebook::
>>> SVG(tree.draw())
The unicode format uses unicode `box drawing characters
<https://en.wikipedia.org/wiki/Box-drawing_character>`_ to render the tree.
This allows rendered trees to be printed out to the terminal::
>>> print(tree.draw(format="unicode"))
6
┏━┻━┓
┃ 5
┃ ┏━┻┓
┃ ┃ 4
┃ ┃ ┏┻┓
3 0 1 2
The ``node_labels`` argument allows the user to specify custom labels
for nodes, or no labels at all::
>>> print(tree.draw(format="unicode", node_labels={}))
┃
┏━┻━┓
┃ ┃
┃ ┏━┻┓
┃ ┃ ┃
┃ ┃ ┏┻┓
┃ ┃ ┃ ┃
:param str path: The path to the file to write the output. If None, do not
write to file.
:param int width: The width of the image in pixels. If not specified, either
defaults to the minimum size required to depict the tree (text formats)
or 200 pixels.
:param int height: The height of the image in pixels. If not specified, either
defaults to the minimum size required to depict the tree (text formats)
or 200 pixels.
:param map node_labels: If specified, show custom labels for the nodes
that are present in the map. Any nodes not specified in the map will
not have a node label.
:param map node_colours: If specified, show custom colours for nodes. (Only
supported in the SVG format.)
:param str format: The format of the returned image. Currently supported
are 'svg', 'ascii' and 'unicode'.
:return: A representation of this tree in the requested format.
:rtype: str
"""
output = drawing.draw_tree(
self, format=format, width=width, height=height,
node_labels=node_labels, node_colours=node_colours,
mutation_labels=mutation_labels, mutation_colours=mutation_colours)
if path is not None:
with open(path, "w") as f:
f.write(output)
return output
def get_num_mutations(self):
return self.num_mutations
@property
def num_mutations(self):
"""
Returns the total number of mutations across all sites on this tree.
:return: The total number of mutations over all sites on this tree.
:rtype: int
"""
return sum(len(site.mutations) for site in self.sites())
@property
def num_sites(self):
"""
Returns the number of sites on this tree.
:return: The number of sites on this tree.
:rtype: int
"""
return self._ll_sparse_tree.get_num_sites()
def sites(self):
"""
Returns an iterator over all the :ref:`sites <sec_site_table_definition>`
in this tree. Sites are returned in order of increasing ID
(and also position). See the :class:`Site` class for details on
the available fields for each site.
:return: An iterator over all sites in this tree.
:rtype: iter(:class:`.Site`)
"""
# TODO change the low-level API to just return the IDs of the sites.
for ll_site in self._ll_sparse_tree.get_sites():
_, _, _, id_, _ = ll_site
yield self.tree_sequence.site(id_)
def mutations(self):
"""
Returns an iterator over all the
:ref:`mutations <sec_mutation_table_definition>` in this tree.
Mutations are returned in order of nondecreasing site ID.
See the :class:`Mutation` class for details on the available fields for
each mutation.
The returned iterator is equivalent to iterating over all sites
and all mutations in each site, i.e.::
>>> for site in tree.sites():
>>> for mutation in site.mutations:
>>> yield mutation
:return: An iterator over all mutations in this tree.
:rtype: iter(:class:`.Mutation`)
"""
for site in self.sites():
for mutation in site.mutations:
yield add_deprecated_mutation_attrs(site, mutation)
def get_leaves(self, u):
# Deprecated alias for samples. See the discussion in the get_num_leaves
# method for why this method is here and why it is semantically incorrect.
# The 'leaves' iterator below correctly returns the leaves below a given
# node.
return self.samples(u)
def leaves(self, u=None):
"""
Returns an iterator over all the leaves in this tree that are
underneath the specified node. If u is not specified, return all leaves
in the tree.
:param int u: The node of interest.
:return: An iterator over all leaves in the subtree rooted at u.
:rtype: iterator
"""
roots = [u]
if u is None:
roots = self.roots
for root in roots:
for v in self.nodes(root):
if self.is_leaf(v):
yield v
def _sample_generator(self, u):
for v in self.nodes(u):
if self.is_sample(v):
yield v
def samples(self, u=None):
"""
Returns an iterator over all the samples in this tree that are
underneath the specified node. If u is a sample, it is included in the
returned iterator. If u is not specified, return all samples in the tree.
If the :meth:`.TreeSequence.trees` method is called with
``sample_lists=True``, this method uses an efficient algorithm to find
the samples. If not, a simple traversal based method is used.
:param int u: The node of interest.
:return: An iterator over all samples in the subtree rooted at u.
:rtype: iterator
"""
roots = [u]
if u is None:
roots = self.roots
for root in roots:
if self._ll_sparse_tree.get_flags() & _msprime.SAMPLE_LISTS:
for v in _msprime.SampleListIterator(self._ll_sparse_tree, root):
yield v
else:
for v in self._sample_generator(root):
yield v
def get_num_leaves(self, u):
# Deprecated alias for num_samples. The method name is inaccurate
# as this will count the number of tracked _samples_. This is only provided to
# avoid breaking existing code and should not be used in new code. We could
# change this method to be semantically correct and just count the
# number of leaves we hit in the leaves() iterator. However, this would
# have the undesirable effect of making code that depends on the constant
# time performance of get_num_leaves many times slower. So, the best option
# is to leave this method as is, and to slowly deprecate it out. Once this
# has been removed, we might add in a ``num_leaves`` method that returns the
# length of the leaves() iterator as one would expect.
return self.num_samples(u)
def get_num_samples(self, u=None):
# Deprecated alias for num_samples.
return self.num_samples(u)
def num_samples(self, u=None):
"""
Returns the number of samples in this tree underneath the specified
node (including the node itself). If u is not specified return
the total number of samples in the tree.
If the :meth:`.TreeSequence.trees` method is called with
``sample_counts=True`` this method is a constant time operation. If not,
a slower traversal based algorithm is used to count the samples.
:param int u: The node of interest.
:return: The number of samples in the subtree rooted at u.
:rtype: int
"""
roots = [u]
if u is None:
roots = self.roots
return sum(self._ll_sparse_tree.get_num_samples(u) for u in roots)
def get_num_tracked_leaves(self, u):
# Deprecated alias for num_tracked_samples. The method name is inaccurate
# as this will count the number of tracked _samples_. This is only provided to
# avoid breaking existing code and should not be used in new code.
return self.num_tracked_samples(u)
def get_num_tracked_samples(self, u=None):
# Deprecated alias for num_tracked_samples
return self.num_tracked_samples(u)
def num_tracked_samples(self, u=None):
"""
Returns the number of samples in the set specified in the
``tracked_samples`` parameter of the :meth:`.TreeSequence.trees` method
underneath the specified node. If the input node is not specified,
return the total number of tracked samples in the tree.
This is a constant time operation.
:param int u: The node of interest.
:return: The number of samples within the set of tracked samples in
the subtree rooted at u.
:rtype: int
:raises RuntimeError: if the :meth:`.TreeSequence.trees`
method is not called with ``sample_counts=True``.
"""
roots = [u]
if u is None:
roots = self.roots
if not (self._ll_sparse_tree.get_flags() & _msprime.SAMPLE_COUNTS):
raise RuntimeError(
"The get_num_tracked_samples method is only supported "
"when sample_counts=True.")
return sum(self._ll_sparse_tree.get_num_tracked_samples(root) for root in roots)
def _preorder_traversal(self, u):
stack = [u]
while len(stack) > 0:
v = stack.pop()
if self.is_internal(v):
stack.extend(reversed(self.get_children(v)))
yield v
def _postorder_traversal(self, u):
stack = [u]
k = NULL_NODE
while stack:
v = stack[-1]
if self.is_internal(v) and v != k:
stack.extend(reversed(self.get_children(v)))
else:
k = self.get_parent(v)
yield stack.pop()
def _inorder_traversal(self, u):
# TODO add a nonrecursive version of the inorder traversal.
children = self.get_children(u)
mid = len(children) // 2
for c in children[:mid]:
for v in self._inorder_traversal(c):
yield v
yield u
for c in children[mid:]:
for v in self._inorder_traversal(c):
yield v
def _levelorder_traversal(self, u):
queue = collections.deque([u])
while queue:
v = queue.popleft()
if self.is_internal(v):
queue.extend(self.get_children(v))
yield v
def nodes(self, root=None, order="preorder"):
"""
Returns an iterator over the nodes in this tree. If the root parameter
is provided, iterate over the nodes in the subtree rooted at this
node. If this is None, iterate over all nodes. If the order parameter
is provided, iterate over the nodes in required tree traversal order.
:param int root: The root of the subtree we are traversing.
:param str order: The traversal ordering. Currently 'preorder',
'inorder', 'postorder' and 'levelorder' ('breadthfirst')
are supported.
:return: An iterator over the nodes in the tree in some traversal order.
:rtype: iterator
"""
methods = {
"preorder": self._preorder_traversal,
"inorder": self._inorder_traversal,
"postorder": self._postorder_traversal,
"levelorder": self._levelorder_traversal,
"breadthfirst": self._levelorder_traversal
}
try:
iterator = methods[order]
except KeyError:
raise ValueError("Traversal ordering '{}' not supported".format(order))
roots = [root]
if root is None:
roots = self.roots
for u in roots:
for v in iterator(u):
yield v
def newick(self, precision=14, time_scale=1):
"""
Returns a `newick encoding <https://en.wikipedia.org/wiki/Newick_format>`_
of this tree. Leaf nodes are labelled with their numerical ID + 1,
and internal nodes are not labelled.
This method is currently primarily for ms-compatibility and
is not intended as a consistent means of data interchange.
:param int precision: The numerical precision with which branch lengths are
printed.
:param float time_scale: A value which all branch lengths are multiplied by.
:return: A newick representation of this tree.
:rtype: str
"""
s = self._ll_sparse_tree.get_newick(precision=precision, time_scale=time_scale)
if not IS_PY2:
s = s.decode()
return s
@property
def parent_dict(self):
return self.get_parent_dict()
def get_parent_dict(self):
pi = {
u: self.parent(u) for u in range(self.num_nodes)
if self.parent(u) != NULL_NODE}
return pi
def __str__(self):
return str(self.get_parent_dict())
def load(path):
"""
Loads a tree sequence from the specified file path. This file must be in the
:ref:`HDF5 file format <sec_hdf5_file_format>` produced by the
:meth:`.TreeSequence.dump` method.
:param str path: The file path of the HDF5 file containing the
tree sequence we wish to load.
:return: The tree sequence object containing the information
stored in the specified file path.
:rtype: :class:`msprime.TreeSequence`
"""
return TreeSequence.load(path)
def load_tables(
nodes, edges, migrations=None, sites=None, mutations=None,
provenances=None, sequence_length=0):
"""
Loads the tree sequence data from the specified table objects, and
returns the resulting :class:`.TreeSequence` object. These tables
must fulfil the properties required for an input tree sequence as
described in the :ref:`sec_valid_tree_sequence_requirements` section.
The ``sequence_length`` parameter determines the
:attr:`.TreeSequence.sequence_length` of the returned tree sequence. If it
is 0 or not specified, the value is taken to be the maximum right
coordinate of the input edges. This parameter is useful in degenerate
situations (such as when there are zero edges), but can usually be ignored.
:param NodeTable nodes: The :ref:`node table <sec_node_table_definition>`
(required).
:param EdgeTable edges: The :ref:`edge table <sec_edge_table_definition>`
(required).
:param MigrationTable migrations: The :ref:`migration table
<sec_migration_table_definition>` (optional).
:param SiteTable sites: The :ref:`site table <sec_site_table_definition>`
(optional; but if supplied, ``mutations`` must also be specified).
:param MutationTable mutations: The :ref:`mutation table
<sec_mutation_table_definition>` (optional; but if supplied, ``sites``
must also be specified).
:param ProvenanceTable provenances: The :ref:`provenance table
<sec_provenance_table_definition>` (optional).
:param float sequence_length: The sequence length of the returned tree sequence. If
not supplied or zero this will be inferred from the set of edges.
:return: A :class:`.TreeSequence` consistent with the specified tables.
:rtype: TreeSequence
"""
# TODO update the low-level module to accept None and remove this
kwargs = {"nodes": nodes, "edges": edges, "sequence_length": sequence_length}
if migrations is not None:
kwargs["migrations"] = migrations
if sites is not None:
kwargs["sites"] = sites
if mutations is not None:
kwargs["mutations"] = mutations
if provenances is not None:
kwargs["provenances"] = provenances
return TreeSequence.load_tables(**kwargs)
def parse_nodes(source, strict=True, encoding='utf8', base64_metadata=True):
"""
Parse the specified file-like object containing a whitespace delimited
description of a node table and returns the corresponding :class:`NodeTable`
instance. See the :ref:`node text format <sec_node_text_format>` section
for the details of the required format and the
:ref:`node table definition <sec_node_table_definition>` section for the
required properties of the contents.
See :func:`.load_text` for a detailed explanation of the ``strict``
parameter.
:param stream source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param string encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
"""
sep = None
if strict:
sep = "\t"
# Read the header and find the indexes of the required fields.
table = tables.NodeTable()
header = source.readline().strip("\n").split(sep)
is_sample_index = header.index("is_sample")
time_index = header.index("time")
population_index = None
metadata_index = None
try:
population_index = header.index("population")
except ValueError:
pass
try:
metadata_index = header.index("metadata")
except ValueError:
pass
for line in source:
tokens = line.split(sep)
if len(tokens) >= 2:
is_sample = int(tokens[is_sample_index])
time = float(tokens[time_index])
flags = 0
if is_sample != 0:
flags |= NODE_IS_SAMPLE
population = NULL_POPULATION
if population_index is not None:
population = int(tokens[population_index])
metadata = b''
if metadata_index is not None and metadata_index < len(tokens):
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(
flags=flags, time=time, population=population, metadata=metadata)
return table
def parse_edges(source, strict=True):
"""
Parse the specified file-like object containing a whitespace delimited
description of a edge table and returns the corresponding :class:`EdgeTable`
instance. See the :ref:`edge text format <sec_edge_text_format>` section
for the details of the required format and the
:ref:`edge table definition <sec_edge_table_definition>` section for the
required properties of the contents.
See :func:`.load_text` for a detailed explanation of the ``strict`` parameter.
:param stream source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
"""
sep = None
if strict:
sep = "\t"
table = tables.EdgeTable()
header = source.readline().strip("\n").split(sep)
left_index = header.index("left")
right_index = header.index("right")
parent_index = header.index("parent")
children_index = header.index("child")
table = tables.EdgeTable()
for line in source:
tokens = line.split(sep)
if len(tokens) >= 4:
left = float(tokens[left_index])
right = float(tokens[right_index])
parent = int(tokens[parent_index])
children = tuple(map(int, tokens[children_index].split(",")))
for child in children:
table.add_row(left=left, right=right, parent=parent, child=child)
return table
def parse_sites(source, strict=True, encoding='utf8', base64_metadata=True):
"""
Parse the specified file-like object containing a whitespace delimited
description of a site table and returns the corresponding :class:`SiteTable`
instance. See the :ref:`site text format <sec_site_text_format>` section
for the details of the required format and the
:ref:`site table definition <sec_site_table_definition>` section for the
required properties of the contents.
See :func:`.load_text` for a detailed explanation of the ``strict``
parameter.
:param stream source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param string encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
"""
sep = None
if strict:
sep = "\t"
header = source.readline().strip("\n").split(sep)
position_index = header.index("position")
ancestral_state_index = header.index("ancestral_state")
metadata_index = None
try:
metadata_index = header.index("metadata")
except ValueError:
pass
table = tables.SiteTable()
for line in source:
tokens = line.split(sep)
if len(tokens) >= 2:
position = float(tokens[position_index])
ancestral_state = tokens[ancestral_state_index]
metadata = b''
if metadata_index is not None and metadata_index < len(tokens):
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(
position=position, ancestral_state=ancestral_state, metadata=metadata)
return table
def parse_mutations(source, strict=True, encoding='utf8', base64_metadata=True):
"""
Parse the specified file-like object containing a whitespace delimited
description of a mutation table and returns the corresponding :class:`MutationTable`
instance. See the :ref:`mutation text format <sec_mutation_text_format>` section
for the details of the required format and the
:ref:`mutation table definition <sec_mutation_table_definition>` section for the
required properties of the contents.
See :func:`.load_text` for a detailed explanation of the ``strict``
parameter.
:param stream source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param string encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
"""
sep = None
if strict:
sep = "\t"
header = source.readline().strip("\n").split(sep)
site_index = header.index("site")
node_index = header.index("node")
derived_state_index = header.index("derived_state")
parent_index = None
parent = NULL_MUTATION
try:
parent_index = header.index("parent")
except ValueError:
pass
metadata_index = None
try:
metadata_index = header.index("metadata")
except ValueError:
pass
table = tables.MutationTable()
for line in source:
tokens = line.split(sep)
if len(tokens) >= 3:
site = int(tokens[site_index])
node = int(tokens[node_index])
derived_state = tokens[derived_state_index]
if parent_index is not None:
parent = int(tokens[parent_index])
metadata = b''
if metadata_index is not None and metadata_index < len(tokens):
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(
site=site, node=node, derived_state=derived_state, parent=parent,
metadata=metadata)
return table
def load_text(nodes, edges, sites=None, mutations=None, sequence_length=0, strict=True,
encoding='utf8', base64_metadata=True):
"""
Parses the tree sequence data from the specified file-like objects, and
returns the resulting :class:`.TreeSequence` object. The format
for these files is documented in the :ref:`sec_text_file_format` section,
and is produced by the :meth:`.TreeSequence.dump_text` method. Further
properties required for an input tree sequence are described in the
:ref:`sec_valid_tree_sequence_requirements` section. This method is intended as a
convenient interface for importing external data into msprime; the HDF5
based file format using by :meth:`msprime.load` is many times more
efficient than this text format.
The ``nodes`` and ``edges`` parameters are mandatory and must be file-like
objects containing text with whitespace delimited columns, parsable by
:func:`parse_nodes` and :func:`parse_edges`, respectively. ``sites`` and
``mutations`` are optional, and must be parsable by :func:`parse_sites` and
:func:`parse_mutations`, respectively.
The ``sequence_length`` parameter determines the
:attr:`.TreeSequence.sequence_length` of the returned tree sequence. If it
is 0 or not specified, the value is taken to be the maximum right
coordinate of the input edges. This parameter is useful in degenerate
situations (such as when there are zero edges), but can usually be ignored.
The ``strict`` parameter controls the field delimiting algorithm that
is used. If ``strict`` is True (the default), we require exactly one
tab character separating each field. If ``strict`` is False, a more relaxed
whitespace delimiting algorithm is used, such that any run of whitespace
is regarded as a field separator. In most situations, ``strict=False``
is more convenient, but it can lead to error in certain situations. For
example, if a deletion is encoded in the mutation table this will not
be parseable when ``strict=False``.
After parsing the tables, :func:`sort_tables` is called to ensure that
the loaded tables satisfy the tree sequence :ref:`ordering requirements
<sec_valid_tree_sequence_requirements>`. Note that this may result in the
IDs of various entities changing from their positions in the input file.
:param stream nodes: The file-like object containing text describing a
:class:`.NodeTable`.
:param stream edges: The file-like object containing text
describing an :class:`.EdgeTable`.
:param stream sites: The file-like object containing text describing a
:class:`.SiteTable`.
:param stream mutations: The file-like object containing text
describing a :class:`MutationTable`.
:param float sequence_length: The sequence length of the returned tree sequence. If
not supplied or zero this will be inferred from the set of edges.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param string encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
:return: The tree sequence object containing the information
stored in the specified file paths.
:rtype: :class:`msprime.TreeSequence`
"""
node_table = parse_nodes(nodes, strict=strict, encoding=encoding,
base64_metadata=base64_metadata)
edge_table = parse_edges(edges, strict=strict)
site_table = tables.SiteTable()
mutation_table = tables.MutationTable()
if sites is not None:
site_table = parse_sites(sites, strict=strict, encoding=encoding,
base64_metadata=base64_metadata)
if mutations is not None:
mutation_table = parse_mutations(mutations, strict=strict, encoding=encoding,
base64_metadata=base64_metadata)
tables.sort_tables(
nodes=node_table, edges=edge_table, sites=site_table, mutations=mutation_table)
return load_tables(
nodes=node_table, edges=edge_table, sites=site_table, mutations=mutation_table,
sequence_length=sequence_length)
class TreeSequence(object):
"""
A single tree sequence, as defined by the :ref:`data model <sec_data_model>`.
A TreeSequence instance can be created from a set of
:ref:`tables <sec_table_definitions>` using :func:`.load_tables`; or loaded
from a set of text files using :func:`.load_text`; or, loaded from a
native file using :func:`load`.
TreeSequences are immutable. To change the data held in a particular
tree sequence, first output the informatinn to a set of tables
(using :meth:`.dump_tables`), edit those tables using the
:ref:`tables api <sec_tables_api>`, and create a new tree sequence using
:func:`.load_tables`.
The :meth:`.trees` method iterates over all trees in a tree sequence, and
the :meth:`.variants` method iterates over all sites and their genotypes.
"""
def __init__(self, ll_tree_sequence):
self._ll_tree_sequence = ll_tree_sequence
@property
def ll_tree_sequence(self):
return self.get_ll_tree_sequence()
def get_ll_tree_sequence(self):
return self._ll_tree_sequence
@classmethod
def load(cls, path):
ts = _msprime.TreeSequence()
ts.load(path)
return TreeSequence(ts)
@classmethod
def load_tables(cls, **kwargs):
ts = _msprime.TreeSequence()
ts.load_tables(**kwargs)
return TreeSequence(ts)
def dump(self, path, zlib_compression=False):
"""
Writes the tree sequence to the specified file path.
:param str path: The file path to write the TreeSequence to.
:param bool zlib_compression: If True, use HDF5's native
compression when storing the data leading to smaller
file size. When loading, data will be decompressed
transparently, but load times will be significantly slower.
"""
self._ll_tree_sequence.dump(path, zlib_compression)
@property
def tables(self):
"""
A copy of the tables underlying this tree sequence. See also
:meth:`.dump_tables`.
:return: A :class:`.TableCollection` containing all a copy of the
tables underlying this tree sequence.
:rtype: TableCollection
"""
return self.dump_tables()
def dump_tables(
self, nodes=None, edges=None, migrations=None, sites=None,
mutations=None, provenances=None):
"""
Copy the contents of the tables underlying the tree sequence to the
specified objects.
:param NodeTable nodes: The NodeTable to load the nodes into.
:param EdgeTable edges: The EdgeTable to load the edges into.
:param MigrationTable migrations: The MigrationTable to load the migrations into.
:param SiteTable sites: The SiteTable to load the sites into.
:param MutationTable mutations: The MutationTable to load the mutations into.
:param ProvenanceTable provenances: The ProvenanceTable to load the provenances
into.
:return: A :class:`.TableCollection` containing all tables underlying
the tree sequence.
:rtype: TableCollection
"""
# TODO document this and test the semantics to passing in new tables
# as well as returning the updated tables.
if nodes is None:
nodes = tables.NodeTable()
if edges is None:
edges = tables.EdgeTable()
if migrations is None:
migrations = tables.MigrationTable()
if sites is None:
sites = tables.SiteTable()
if mutations is None:
mutations = tables.MutationTable()
if provenances is None:
provenances = tables.ProvenanceTable()
self._ll_tree_sequence.dump_tables(
nodes=nodes, edges=edges, migrations=migrations, sites=sites,
mutations=mutations, provenances=provenances)
return tables.TableCollection(
nodes=nodes, edges=edges, migrations=migrations, sites=sites,
mutations=mutations, provenances=provenances)
def dump_text(
self, nodes=None, edges=None, sites=None, mutations=None, provenances=None,
precision=6, encoding='utf8', base64_metadata=True):
"""
Writes a text representation of the tables underlying the tree sequence
to the specified connections.
If Base64 encoding is not used, then metadata will be saved directly, possibly
resulting in errors reading the tables back in if metadata includes whitespace.
:param stream nodes: The file-like object (having a .write() method) to write
the NodeTable to.
:param stream edges: The file-like object to write the EdgeTable to.
:param stream sites: The file-like object to write the SiteTable to.
:param stream mutations: The file-like object to write the MutationTable to.
:param stream provenances: The file-like object to write the ProvenanceTable to.
:param int precision: The number of digits of precision.
:param string encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
"""
if nodes is not None:
print(
"id", "is_sample", "time", "population", "metadata", sep="\t",
file=nodes)
for node in self.nodes():
metadata = node.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
row = (
"{id:d}\t"
"{is_sample:d}\t"
"{time:.{precision}f}\t"
"{population:d}\t"
"{metadata}").format(
precision=precision, id=node.id,
is_sample=node.is_sample(), time=node.time,
population=node.population,
metadata=metadata)
print(row, file=nodes)
if edges is not None:
print("left", "right", "parent", "child", sep="\t", file=edges)
for edge in self.edges():
row = (
"{left:.{precision}f}\t"
"{right:.{precision}f}\t"
"{parent:d}\t"
"{child:d}").format(
precision=precision, left=edge.left, right=edge.right,
parent=edge.parent, child=edge.child)
print(row, file=edges)
if sites is not None:
print("position", "ancestral_state", "metadata", sep="\t", file=sites)
for site in self.sites():
metadata = site.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
row = (
"{position:.{precision}f}\t"
"{ancestral_state}\t"
"{metadata}").format(
precision=precision, position=site.position,
ancestral_state=site.ancestral_state,
metadata=metadata)
print(row, file=sites)
if mutations is not None:
print(
"site", "node", "derived_state", "parent", "metadata",
sep="\t", file=mutations)
for site in self.sites():
for mutation in site.mutations:
metadata = mutation.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
row = (
"{site}\t"
"{node}\t"
"{derived_state}\t"
"{parent}\t"
"{metadata}").format(
site=mutation.site, node=mutation.node,
derived_state=mutation.derived_state,
parent=mutation.parent,
metadata=metadata)
print(row, file=mutations)
if provenances is not None:
print("id", "timestamp", "record", sep="\t", file=provenances)
for provenance in self.provenances():
row = (
"{id}\t"
"{timestamp}\t"
"{record}\t").format(
id=provenance.id,
timestamp=provenance.timestamp,
record=provenance.record)
print(row, file=provenances)
# num_samples was originally called sample_size, and so we must keep sample_size
# around as a deprecated alias.
@property
def num_samples(self):
"""
Returns the number of samples in this tree sequence. This is the number
of sample nodes in each tree.
:return: The number of sample nodes in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_samples()
@property
def sample_size(self):
# Deprecated alias for num_samples
return self.num_samples
def get_sample_size(self):
# Deprecated alias for num_samples
return self.num_samples
@property
def sequence_length(self):
"""
Returns the sequence length in this tree sequence. This defines the
genomic scale over which tree coordinates are defined. Given a
tree sequence with a sequence length :math:`L`, the constituent
trees will be defined over the half-closed interval
:math:`(0, L]`. Each tree then covers some subset of this
interval --- see :meth:`msprime.SparseTree.get_interval` for details.
:return: The length of the sequence in this tree sequence in bases.
:rtype: float
"""
return self.get_sequence_length()
def get_sequence_length(self):
return self._ll_tree_sequence.get_sequence_length()
@property
def num_edges(self):
"""
Returns the number of :ref:`edges <sec_edge_table_definition>` in this
tree sequence.
:return: The number of edges in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_edges()
def get_num_trees(self):
# Deprecated alias for self.num_trees
return self.num_trees
@property
def num_trees(self):
"""
Returns the number of distinct trees in this tree sequence. This
is equal to the number of trees returned by the :meth:`.trees`
method.
:return: The number of trees in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_trees()
def get_num_sites(self):
# Deprecated alias for self.num_sites
return self._ll_tree_sequence.get_num_sites()
@property
def num_sites(self):
"""
Returns the number of sites in this tree sequence.
:return: The number of sites in this tree sequence.
:rtype: int
"""
return self.get_num_sites()
def get_num_mutations(self):
# Deprecated alias for self.num_mutations
return self.num_mutations
@property
def num_mutations(self):
"""
Returns the number of :ref:`mutations <sec_mutation_table_definition>`
in this tree sequence.
:return: The number of mutations in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_mutations()
def get_num_nodes(self):
# Deprecated alias for self.num_nodes
return self.num_nodes
@property
def num_nodes(self):
"""
Returns the number of :ref:`nodes <sec_node_table_definition>` in
this tree sequence.
:return: The number of nodes in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_nodes()
@property
def num_provenances(self):
"""
Returns the number of :ref:`provenances <sec_provenance_table_definition>`
in this tree sequence.
:return: The number of provenances in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_provenances()
@property
def num_migrations(self):
"""
Returns the number of :ref:`migrations <sec_migration_table_definition>`
in this tree sequence.
:return: The number of migrations in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_migrations()
def migrations(self):
"""
Returns an iterator over all the
:ref:`migrations <sec_migration_table_definition>` in this tree sequence.
Migrations are returned in nondecreasing order of the ``time`` value.
:return: An iterator over all migrations.
:rtype: iter(:class:`.Migration`)
"""
for j in range(self._ll_tree_sequence.get_num_migrations()):
yield Migration(*self._ll_tree_sequence.get_migration(j))
def provenances(self):
for j in range(self.num_provenances):
yield self.provenance(j)
def nodes(self):
"""
Returns an iterator over all the :ref:`nodes <sec_node_table_definition>`
in this tree sequence.
:return: An iterator over all nodes.
:rtype: iter(:class:`.Node`)
"""
for j in range(self.num_nodes):
yield self.node(j)
def edges(self):
"""
Returns an iterator over all the :ref:`edges <sec_edge_table_definition>`
in this tree sequence. Edges are returned in the order required
for a :ref:`valid tree sequence <sec_valid_tree_sequence_requirements>`. So,
edges are guaranteed to be ordered such that (a) all parents with a
given ID are contiguous; (b) edges are returned in non-descreasing
order of parent time; (c) within the edges for a given parent, edges
are sorted first by child ID and then by left coordinate.
:return: An iterator over all edges.
:rtype: iter(:class:`.Edge`)
"""
for j in range(self.num_edges):
left, right, parent, child = self._ll_tree_sequence.get_edge(j)
yield Edge(left=left, right=right, parent=parent, child=child)
def edgesets(self):
# TODO the order that these records are returned in is not well specified.
# Hopefully this does not matter, and we can just state that the ordering
# should not be depended on.
children = collections.defaultdict(set)
active_edgesets = {}
for (left, right), edges_out, edges_in in self.edge_diffs():
# Complete and return any edgesets that are affected by this tree
# transition
parents = iter(edge.parent for edge in itertools.chain(edges_out, edges_in))
for parent in parents:
if parent in active_edgesets:
edgeset = active_edgesets.pop(parent)
edgeset.right = left
edgeset.children = sorted(children[parent])
yield edgeset
for edge in edges_out:
children[edge.parent].remove(edge.child)
for edge in edges_in:
children[edge.parent].add(edge.child)
# Update the active edgesets
for edge in itertools.chain(edges_out, edges_in):
if len(children[edge.parent]) > 0 and edge.parent not in active_edgesets:
active_edgesets[edge.parent] = Edgeset(left, right, edge.parent, [])
for parent in active_edgesets.keys():
edgeset = active_edgesets[parent]
edgeset.right = self.sequence_length
edgeset.children = sorted(children[edgeset.parent])
yield edgeset
def edge_diffs(self):
iterator = _msprime.TreeDiffIterator(self._ll_tree_sequence)
for interval, edge_tuples_out, edge_tuples_in in iterator:
edges_out = [Edge(*e) for e in edge_tuples_out]
edges_in = [Edge(*e) for e in edge_tuples_in]
yield interval, edges_out, edges_in
def sites(self):
"""
Returns an iterator over all the :ref:`sites <sec_site_table_definition>`
in this tree sequence. Sites are returned in order of increasing ID
(and also position). See the :class:`Site` class for details on
the available fields for each site.
:return: An iterator over all sites.
:rtype: iter(:class:`.Site`)
"""
for j in range(self.num_sites):
yield self.site(j)
def mutations(self):
"""
Returns an iterator over all the
:ref:`mutations <sec_mutation_table_definition>` in this tree sequence.
Mutations are returned in order of nondecreasing site ID.
See the :class:`Mutation` class for details on the available fields for
each mutation.
The returned iterator is equivalent to iterating over all sites
and all mutations in each site, i.e.::
>>> for site in tree_sequence.sites():
>>> for mutation in site.mutations:
>>> yield mutation
:return: An iterator over all mutations in this tree sequence.
:rtype: iter(:class:`.Mutation`)
"""
for site in self.sites():
for mutation in site.mutations:
yield add_deprecated_mutation_attrs(site, mutation)
def breakpoints(self):
"""
Returns an iterator over the breakpoints along the chromosome,
including the two extreme points 0 and L. This is equivalent to
>>> [0] + [t.get_interval()[1] for t in self.trees()]
although we do not build an explicit list.
:return: An iterator over all the breakpoints along the simulated
sequence.
:rtype: iter
"""
yield 0
for t in self.trees():
yield t.get_interval()[1]
def first(self):
"""
Returns the first tree in this :class:`.TreeSequence`. To iterate over all
trees in the sequence, use the :meth:`.trees` method.
Currently does not support the extra options for the :meth:`.trees` method.
:return: The first tree in this tree sequence.
:rtype: :class:`.SparseTree`.
"""
return next(self.trees())
def trees(
self, tracked_samples=None, sample_counts=True, sample_lists=False,
tracked_leaves=None, leaf_counts=None, leaf_lists=None):
"""
Returns an iterator over the trees in this tree sequence. Each value
returned in this iterator is an instance of :class:`.SparseTree`.
The ``sample_counts`` and ``sample_lists`` parameters control the
features that are enabled for the resulting trees. If ``sample_counts``
is True, then it is possible to count the number of samples underneath
a particular node in constant time using the :meth:`.get_num_samples`
method. If ``sample_lists`` is True a more efficient algorithm is
used in the :meth:`.SparseTree.samples` method.
The ``tracked_samples`` parameter can be used to efficiently count the
number of samples in a given set that exist in a particular subtree
using the :meth:`.SparseTree.get_num_tracked_samples` method. It is an
error to use the ``tracked_samples`` parameter when the ``sample_counts``
flag is False.
:warning: Do not store the results of this iterator in a list!
For performance reasons, the same underlying object is used
for every tree returned which will most likely lead to unexpected
behaviour.
:param list tracked_samples: The list of samples to be tracked and
counted using the :meth:`.SparseTree.get_num_tracked_samples`
method.
:param bool sample_counts: If True, support constant time sample counts
via the :meth:`.SparseTree.get_num_samples` and
:meth:`.SparseTree.get_num_tracked_samples` methods.
:param bool sample_lists: If True, provide more efficient access
to the samples beneath a give node using the
:meth:`.SparseTree.samples` method.
:return: An iterator over the sparse trees in this tree sequence.
:rtype: iter
"""
# tracked_leaves, leaf_counts and leaf_lists are deprecated aliases
# for tracked_samples, sample_counts and sample_lists respectively.
# These are left over from an older version of the API when leaves
# and samples were synonymous.
if tracked_leaves is not None:
tracked_samples = tracked_leaves
if leaf_counts is not None:
sample_counts = leaf_counts
if leaf_lists is not None:
sample_lists = leaf_lists
flags = 0
if sample_counts:
flags |= _msprime.SAMPLE_COUNTS
elif tracked_samples is not None:
raise ValueError("Cannot set tracked_samples without sample_counts")
if sample_lists:
flags |= _msprime.SAMPLE_LISTS
kwargs = {"flags": flags}
if tracked_samples is not None:
# TODO remove this when we allow numpy arrays in the low-level API.
kwargs["tracked_samples"] = list(tracked_samples)
ll_sparse_tree = _msprime.SparseTree(self._ll_tree_sequence, **kwargs)
iterator = _msprime.SparseTreeIterator(ll_sparse_tree)
sparse_tree = SparseTree(ll_sparse_tree, self)
for _ in iterator:
yield sparse_tree
def haplotypes(self):
"""
Returns an iterator over the haplotypes resulting from the trees
and mutations in this tree sequence as a string.
The iterator returns a total of :math:`n` strings, each of which
contains :math:`s` characters (:math:`n` is the sample size
returned by :attr:`msprime.TreeSequence.num_samples` and
:math:`s` is the number of sites returned by
:attr:`msprime.TreeSequence.num_sites`). The first
string returned is the haplotype for sample `0`, and so on.
For a given haplotype ``h``, the value of ``h[j]`` is the observed
allelic state at site ``j``.
See also the :meth:`variants` iterator for site-centric access
to sample genotypes.
This method is only supported for single-letter alleles.
:return: An iterator over the haplotype strings for the samples in
this tree sequence.
:rtype: iter
:raises: LibraryError if called on a tree sequence containing
multiletter alleles.
"""
hapgen = _msprime.HaplotypeGenerator(self._ll_tree_sequence)
j = 0
# Would use range here except for Python 2.
while j < self.num_samples:
yield hapgen.get_haplotype(j)
j += 1
def variants(self, as_bytes=False):
"""
Returns an iterator over the variants in this tree sequence. See the
:class:`Variant` class for details on the fields of each returned
object. By default the ``genotypes`` for the variants are numpy arrays,
corresponding to indexes into the ``alleles`` array. If the
``as_bytes`` parameter is true, these allelic values are recorded
directly into a bytes array.
.. note::
The ``as_bytes`` parameter is kept as a compatibility
option for older code. It is not the recommended way of
accessing variant data, and will be deprecated in a later
release. Another method will be provided to obtain the allelic
states for each site directly.
:param bool as_bytes: If True, the genotype values will be returned
as a Python bytes object. This is useful in certain situations
(i.e., directly printing the genotypes) or when numpy is
not available. Otherwise, genotypes are returned as a numpy
array (the default).
:return: An iterator of all variants this tree sequence.
:rtype: iter(:class:`Variant`)
"""
# See comments for the Variant type for discussion on why the
# present form was chosen.
check_numpy()
iterator = _msprime.VariantGenerator(self._ll_tree_sequence)
for site_id, genotypes, alleles in iterator:
site = self.site(site_id)
if as_bytes:
if any(len(allele) > 1 for allele in alleles):
raise ValueError(
"as_bytes only supported for single-letter alleles")
bytes_genotypes = np.empty(self.num_samples, dtype=np.uint8)
lookup = np.array([ord(a[0]) for a in alleles], dtype=np.uint8)
bytes_genotypes[:] = lookup[genotypes]
genotypes = bytes_genotypes.tobytes()
yield Variant(site, alleles, genotypes)
def genotype_matrix(self):
"""
Returns an :math:`m \\times n` numpy array of the genotypes in this
tree sequence, where :math:`m` is the number of site and :math:`n`
the number of samples. The genotypes are the indexes into the array
of ``alleles``, as described for the :class:`Variant` class. The value
0 always corresponds to the ancestal state, and values > 0 represent
distinct derived states.
.. warning::
This method can consume a **very large** amount of memory! If
all genotypes are not needed at once, it is usually better to
access them sequentially using the :meth:`.variants` iterator.
:return: The full matrix of genotypes.
:rtype: numpy.ndarray (dtype=np.uint8)
"""
return self._ll_tree_sequence.get_genotype_matrix()
def get_pairwise_diversity(self, samples=None):
# Deprecated alias for self.pairwise_diversity
return self.pairwise_diversity(samples)
def pairwise_diversity(self, samples=None):
"""
Returns the value of :math:`\pi`, the pairwise nucleotide site diversity,
which is the average number of mutations that differ between a randomly
chosen pair of samples. If `samples` is specified, calculate the
diversity within this set.
.. note:: This method does not currently support sites that have more
than one mutation. Using it on such a tree sequence will raise
a LibraryError with an "Unsupported operation" message.
:param iterable samples: The set of samples within which we calculate
the diversity. If None, calculate diversity within the entire sample.
:return: The pairwise nucleotide site diversity.
:rtype: float
"""
if samples is None:
samples = self.samples()
return self._ll_tree_sequence.get_pairwise_diversity(list(samples))
def node(self, id_):
"""
Returns the :ref:`node <sec_node_table_definition>` in this tree sequence
with the specified ID.
:rtype: :class:`.Node`
"""
flags, time, population, metadata = self._ll_tree_sequence.get_node(id_)
return Node(
id_=id_, flags=flags, time=time, population=population, metadata=metadata)
def mutation(self, id_):
"""
Returns the :ref:`mutation <sec_mutation_table_definition>` in this tree sequence
with the specified ID.
:rtype: :class:`.Mutation`
"""
ll_mut = self._ll_tree_sequence.get_mutation(id_)
return Mutation(
id_=id_, site=ll_mut[0], node=ll_mut[1], derived_state=ll_mut[2],
parent=ll_mut[3], metadata=ll_mut[4])
def site(self, id_):
"""
Returns the :ref:`site <sec_site_table_definition>` in this tree sequence
with the specified ID.
:rtype: :class:`.Site`
"""
ll_site = self._ll_tree_sequence.get_site(id_)
pos, ancestral_state, ll_mutations, _, metadata = ll_site
mutations = [self.mutation(mut_id) for mut_id in ll_mutations]
return Site(
id_=id_, position=pos, ancestral_state=ancestral_state,
mutations=mutations, metadata=metadata)
def provenance(self, id_):
timestamp, record = self._ll_tree_sequence.get_provenance(id_)
return Provenance(id_=id_, timestamp=timestamp, record=record)
def get_samples(self, population_id=None):
# Deprecated alias for samples()
return self.samples(population_id)
def samples(self, population=None, population_id=None):
"""
Returns an array of the sample node IDs in this tree sequence. If the
``population`` parameter is specified, only return sample IDs from this
population.
:param int population: The population of interest. If None,
return all samples.
:param int population_id: Deprecated alias for ``population``.
:return: A numpy array of the node IDs for the samples of interest.
:rtype: numpy.ndarray (dtype=np.int32)
"""
if population is not None and population_id is not None:
raise ValueError(
"population_id and population are aliases. Cannot specify both")
if population_id is not None:
population = population_id
# TODO the low-level tree sequence should perform this operation natively
# and return a numpy array.
samples = self._ll_tree_sequence.get_samples()
if population is not None:
samples = [
u for u in samples if self.get_population(u) == population]
return np.array(samples, dtype=np.int32)
def write_vcf(self, output, ploidy=1, contig_id="1"):
"""
Writes a VCF formatted file to the specified file-like object. If a
ploidy value is supplied, allele values are combined among adjacent
samples to form a phased genotype of the required ploidy. For example,
if we have a ploidy of 2 and a sample of size 6, then we will have
3 diploid samples in the output, consisting of the combined alleles
for samples [0, 1], [2, 3] and [4, 5]. If we had alleles 011110 at
a particular variant, then we would output the genotypes 0|1, 1|1
and 1|0 in VCF. Sample names are generated by appending the index
to the prefix ``msp_`` such that we would have the sample names
``msp_0``, ``msp_1`` and ``msp_2`` in the running example.
Example usage:
>>> with open("output.vcf", "w") as vcf_file:
>>> tree_sequence.write_vcf(vcf_file, 2)
:param File output: The file-like object to write the VCF output.
:param int ploidy: The ploidy of the individual samples in the
VCF. This sample size must be divisible by ploidy.
:param str contig_id: The value of the CHROM column in the output VCF.
"""
if ploidy < 1:
raise ValueError("Ploidy must be >= sample size")
if self.get_sample_size() % ploidy != 0:
raise ValueError("Sample size must be divisible by ploidy")
converter = _msprime.VcfConverter(
self._ll_tree_sequence, ploidy=ploidy, contig_id=contig_id)
output.write(converter.get_header())
for record in converter:
output.write(record)
def simplify(self, samples=None, filter_zero_mutation_sites=True, map_nodes=False):
"""
Returns a simplified tree sequence that retains only the history of
the nodes given in the list ``samples``. If ``map_nodes`` is true,
also return a numpy array mapping the node IDs in this tree sequence to
their node IDs in the simplified tree tree sequence. If a node ``u`` is not
present in the new tree sequence, the value of this mapping will be
NULL_NODE (-1).
In the returned tree sequence, the node with ID ``0`` corresponds to
``samples[0]``, node ``1`` corresponds to ``samples[1]``, and so on. Node
IDs in the returned tree sequence are then allocated sequentially
in time order. Note that this does **not** necessarily mean that nodes
in the returned tree sequence will be in strict time order (as we
may have internal or ancient samples).
If you wish to convert a set of tables that do not satisfy all
requirements for building a TreeSequence, then use
:func:`.simplify_tables()`.
:param list samples: The list of nodes for which to retain information. This
may be a numpy array (or array-like) object (dtype=np.int32).
:param bool filter_zero_mutation_sites: If True, remove any sites that have
no mutations in the simplified tree sequence. Defaults to True.
:param bool map_nodes: If True, return a tuple containing the resulting
tree sequence and a numpy array mapping node IDs in the current tree
sequence to their corresponding node IDs in the returned tree sequence.
If False (the default), return only the tree sequence object itself.
:return: The simplified tree sequence, or (if ``map_nodes`` is True)
a tuple containing the simplified tree sequence and a numpy array
mapping source node IDs to their corresponding IDs in the new tree
sequence.
:rtype: .TreeSequence or a (.TreeSequence, numpy.array) tuple
"""
check_numpy()
t = self.dump_tables()
if samples is None:
samples = self.get_samples()
node_map = tables.simplify_tables(
samples=samples, sequence_length=self.sequence_length,
nodes=t.nodes, edges=t.edges,
sites=t.sites, mutations=t.mutations,
filter_zero_mutation_sites=filter_zero_mutation_sites)
# TODO add simplify arguments here??
t.provenances.add_row(record=json.dumps(
provenance.get_provenance_dict("simplify", [])))
new_ts = load_tables(
nodes=t.nodes, edges=t.edges, migrations=t.migrations, sites=t.sites,
mutations=t.mutations, provenances=t.provenances,
sequence_length=self.sequence_length)
if map_nodes:
return new_ts, node_map
else:
return new_ts
############################################
#
# Deprecated APIs. These are either already unsupported, or will be unsupported in a
# later release.
#
############################################
def get_time(self, u):
# Deprecated. Use ts.node(u).time
if u < 0 or u >= self.get_num_nodes():
raise ValueError("ID out of bounds")
node = self.node(u)
return node.time
def get_population(self, u):
# Deprecated. Use ts.node(u).population
if u < 0 or u >= self.get_num_nodes():
raise ValueError("ID out of bounds")
node = self.node(u)
return node.population
def records(self):
# Deprecated. Use either ts.edges() or ts.edgesets().
t = [node.time for node in self.nodes()]
pop = [node.population for node in self.nodes()]
for e in self.edgesets():
yield CoalescenceRecord(
e.left, e.right, e.parent, e.children, t[e.parent], pop[e.parent])
# Unsupported old methods.
def get_num_records(self):
raise NotImplementedError(
"This method is no longer supported. Please use the "
"TreeSequence.num_edges if possible to work with edges rather "
"than coalescence records. If not, please use len(list(ts.edgesets())) "
"which should return the number of coalescence records, as previously "
"defined. Please open an issue on GitHub if this is "
"important for your workflow.")
def diffs(self):
raise NotImplementedError(
"This method is no longer supported. Please use the "
"TreeSequence.edge_diffs() method instead")
def newick_trees(self, precision=3, breakpoints=None, Ne=1):
raise NotImplementedError(
"This method is no longer supported. Please use the SparseTree.newick"
" method instead")
|
ashander/msprime
|
msprime/trees.py
|
Python
|
gpl-3.0
| 89,405
| 0.001455
|
import pymongo
from flask import g
from flask import current_app as app
def get_db():
if not hasattr(g, 'conn'):
print(app.config)
g.conn = pymongo.MongoClient(
app.config['MONGODB_HOST'],
int(app.config['MONGODB_PORT'])
)
if not hasattr(g, 'db'):
g.db = g.conn[app.config['MONGODB_DB']]
return g.db
# todo
# @app.teardown_appcontext
# def teardown_db(exception):
# conn = getattr(g, 'conn', None)
# if conn is not None:
# conn.close()
|
patrykomiotek/seo-monitor-api
|
app/db.py
|
Python
|
mit
| 524
| 0.003817
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.