hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c8d67b72bebab14d679478ded74697c6f36da24
| 144,052
|
py
|
Python
|
cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py
|
traghavendra/cinder-train
|
49af592c61da3216c04f5771b8ebf0927c5ce1c8
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py
|
traghavendra/cinder-train
|
49af592c61da3216c04f5771b8ebf0927c5ce1c8
|
[
"Apache-2.0"
] | 1
|
2021-03-31T19:22:03.000Z
|
2021-03-31T19:22:03.000Z
|
cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py
|
alokchandra11/cinder
|
121d9f512b4a6d1afe6a690effb7c2b379040a7b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMware vCenter VMDK driver.
"""
import re
import ddt
import mock
from oslo_utils import units
from oslo_utils import versionutils
from oslo_vmware import exceptions
from oslo_vmware import image_transfer
from oslo_vmware import vim_util
import six
from cinder import context
from cinder import exception as cinder_exceptions
from cinder import test
from cinder.tests.unit import fake_constants
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume import configuration
from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
from cinder.volume.drivers.vmware import vmdk
from cinder.volume.drivers.vmware import volumeops
# TODO(vbala) Split test methods handling multiple cases into multiple methods,
# each handling a specific case.
@ddt.ddt
class VMwareVcVmdkDriverTestCase(test.TestCase):
"""Unit tests for VMwareVcVmdkDriver."""
IP = 'localhost'
PORT = 2321
USERNAME = 'username'
PASSWORD = 'password'
VOLUME_FOLDER = 'cinder-volumes'
API_RETRY_COUNT = 3
TASK_POLL_INTERVAL = 5.0
IMG_TX_TIMEOUT = 10
MAX_OBJECTS = 100
TMP_DIR = "/vmware-tmp"
CA_FILE = "/etc/ssl/rui-ca-cert.pem"
VMDK_DRIVER = vmdk.VMwareVcVmdkDriver
CLUSTERS = ["cls-1", "cls-2"]
DEFAULT_VC_VERSION = '5.5'
POOL_SIZE = 20
SNAPSHOT_FORMAT = 'COW'
VOL_ID = 'abcdefab-cdef-abcd-efab-cdefabcdefab'
SRC_VOL_ID = '9b3f6f1b-03a9-4f1e-aaff-ae15122b6ccf'
DISPLAY_NAME = 'foo'
VOL_TYPE_ID = 'd61b8cb3-aa1b-4c9b-b79e-abcdbda8b58a'
VOL_SIZE = 2
PROJECT_ID = 'd45beabe-f5de-47b7-b462-0d9ea02889bc'
SNAPSHOT_ID = '2f59670a-0355-4790-834c-563b65bba740'
SNAPSHOT_NAME = 'snap-foo'
SNAPSHOT_DESCRIPTION = 'test snapshot'
IMAGE_ID = 'eb87f4b0-d625-47f8-bb45-71c43b486d3a'
IMAGE_NAME = 'image-1'
ADAPTER_TYPE = volumeops.VirtualDiskAdapterType.BUS_LOGIC
def setUp(self):
super(VMwareVcVmdkDriverTestCase, self).setUp()
self._config = mock.Mock(spec=configuration.Configuration)
self._config.vmware_host_ip = self.IP
self._config.vmware_host_port = self.PORT
self._config.vmware_host_username = self.USERNAME
self._config.vmware_host_password = self.PASSWORD
self._config.vmware_wsdl_location = None
self._config.vmware_volume_folder = self.VOLUME_FOLDER
self._config.vmware_api_retry_count = self.API_RETRY_COUNT
self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL
self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT
self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS
self._config.vmware_tmp_dir = self.TMP_DIR
self._config.vmware_ca_file = self.CA_FILE
self._config.vmware_insecure = False
self._config.vmware_cluster_name = self.CLUSTERS
self._config.vmware_host_version = self.DEFAULT_VC_VERSION
self._config.vmware_connection_pool_size = self.POOL_SIZE
self._config.vmware_adapter_type = self.ADAPTER_TYPE
self._config.vmware_snapshot_format = self.SNAPSHOT_FORMAT
self._config.vmware_lazy_create = True
self._config.vmware_datastore_regex = None
self._db = mock.Mock()
self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config,
db=self._db)
self._context = context.get_admin_context()
def test_get_volume_stats(self):
stats = self._driver.get_volume_stats()
self.assertEqual('VMware', stats['vendor_name'])
self.assertEqual(self._driver.VERSION, stats['driver_version'])
self.assertEqual('vmdk', stats['storage_protocol'])
self.assertEqual(0, stats['reserved_percentage'])
self.assertEqual('unknown', stats['total_capacity_gb'])
self.assertEqual('unknown', stats['free_capacity_gb'])
self.assertFalse(stats['shared_targets'])
def _create_volume_dict(self,
vol_id=VOL_ID,
display_name=DISPLAY_NAME,
volume_type_id=VOL_TYPE_ID,
status='available',
size=VOL_SIZE,
attachment=None,
project_id=PROJECT_ID):
return {'id': vol_id,
'display_name': display_name,
'name': 'volume-%s' % vol_id,
'volume_type_id': volume_type_id,
'status': status,
'size': size,
'volume_attachment': attachment,
'project_id': project_id,
}
def _create_volume_obj(self,
vol_id=VOL_ID,
display_name=DISPLAY_NAME,
volume_type_id=VOL_TYPE_ID,
status='available',
size=VOL_SIZE,
attachment=None,
project_id=PROJECT_ID):
vol = self._create_volume_dict(
vol_id, display_name, volume_type_id, status, size, attachment,
project_id)
return fake_volume.fake_volume_obj(self._context, **vol)
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
@mock.patch.object(VMDK_DRIVER, '_get_adapter_type')
def test_verify_volume_creation(self, get_adapter_type, ds_sel,
get_storage_profile, get_disk_type):
profile_name = mock.sentinel.profile_name
get_storage_profile.return_value = profile_name
volume = self._create_volume_obj()
self._driver._verify_volume_creation(volume)
get_disk_type.assert_called_once_with(volume)
get_storage_profile.assert_called_once_with(volume)
ds_sel.get_profile_id.assert_called_once_with(profile_name)
get_adapter_type.assert_called_once_with(volume)
@mock.patch.object(VMDK_DRIVER, '_verify_volume_creation')
def test_create_volume(self, verify_volume_creation):
volume = self._create_volume_dict()
self._driver.create_volume(volume)
verify_volume_creation.assert_called_once_with(volume)
@mock.patch.object(VMDK_DRIVER, '_create_backing')
def test_create_volume_with_lazy_create_disabled(self, create_backing):
self._config.vmware_lazy_create = False
volume = self._create_volume_dict()
self._driver.create_volume(volume)
create_backing.assert_called_once_with(volume)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_delete_volume_without_backing(self, vops):
vops.get_backing.return_value = None
volume = self._create_volume_dict()
self._driver.delete_volume(volume)
vops.get_backing.assert_called_once_with(volume['name'], volume['id'])
self.assertFalse(vops.delete_backing.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_delete_volume(self, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
volume = self._create_volume_dict()
self._driver.delete_volume(volume)
vops.get_backing.assert_called_once_with(volume['name'], volume['id'])
vops.delete_backing.assert_called_once_with(backing)
@mock.patch('cinder.volume.drivers.vmware.vmdk.'
'_get_volume_type_extra_spec')
@mock.patch('cinder.volume.drivers.vmware.volumeops.'
'VirtualDiskType.validate')
def test_get_extra_spec_disk_type(self, validate,
get_volume_type_extra_spec):
vmdk_type = mock.sentinel.vmdk_type
get_volume_type_extra_spec.return_value = vmdk_type
type_id = mock.sentinel.type_id
self.assertEqual(vmdk_type,
self._driver._get_extra_spec_disk_type(type_id))
get_volume_type_extra_spec.assert_called_once_with(
type_id, 'vmdk_type', default_value=vmdk.THIN_VMDK_TYPE)
validate.assert_called_once_with(vmdk_type)
@mock.patch.object(VMDK_DRIVER, '_get_extra_spec_disk_type')
def test_get_disk_type(self, get_extra_spec_disk_type):
vmdk_type = mock.sentinel.vmdk_type
get_extra_spec_disk_type.return_value = vmdk_type
volume = self._create_volume_dict()
self.assertEqual(vmdk_type, self._driver._get_disk_type(volume))
get_extra_spec_disk_type.assert_called_once_with(
volume['volume_type_id'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.'
'_get_volume_type_extra_spec')
@mock.patch('cinder.volume.drivers.vmware.volumeops.'
'VirtualDiskAdapterType.validate')
def test_get_extra_spec_adapter_type(
self, validate, get_volume_type_extra_spec):
adapter_type = mock.sentinel.adapter_type
get_volume_type_extra_spec.return_value = adapter_type
type_id = mock.sentinel.type_id
self.assertEqual(adapter_type,
self._driver._get_extra_spec_adapter_type(type_id))
get_volume_type_extra_spec.assert_called_once_with(
type_id, 'adapter_type',
default_value=self._driver.configuration.vmware_adapter_type)
validate.assert_called_once_with(adapter_type)
@mock.patch.object(VMDK_DRIVER, '_get_extra_spec_adapter_type')
def test_get_adapter_type(self, get_extra_spec_adapter_type):
adapter_type = mock.sentinel.adapter_type
get_extra_spec_adapter_type.return_value = adapter_type
volume = self._create_volume_dict()
self.assertEqual(adapter_type, self._driver._get_adapter_type(volume))
get_extra_spec_adapter_type.assert_called_once_with(
volume['volume_type_id'])
def _create_snapshot_dict(self,
volume,
snap_id=SNAPSHOT_ID,
name=SNAPSHOT_NAME,
description=SNAPSHOT_DESCRIPTION,
provider_location=None):
return {'id': snap_id,
'volume': volume,
'volume_name': volume['name'],
'name': name,
'display_description': description,
'volume_size': volume['size'],
'provider_location': provider_location
}
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
def test_get_snapshot_group_folder(self, get_volume_group_folder, vops):
dc = mock.sentinel.dc
vops.get_dc.return_value = dc
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
volume = self._create_volume_obj()
backing = mock.sentinel.backing
self.assertEqual(folder, self._driver._get_snapshot_group_folder(
volume, backing))
vops.get_dc.assert_called_once_with(backing)
get_volume_group_folder.assert_called_once_with(
dc, volume.project_id, snapshot=True)
@mock.patch.object(VMDK_DRIVER, '_get_snapshot_group_folder')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_in_use')
@mock.patch.object(VMDK_DRIVER, '_create_temp_backing_from_attached_vmdk')
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
def _test_create_snapshot_template_format(
self, delete_temp_backing, create_temp_backing_from_attached_vmdk,
in_use, vops, get_snapshot_group_folder, attached=False,
mark_as_template_error=False):
folder = mock.sentinel.folder
get_snapshot_group_folder.return_value = folder
datastore = mock.sentinel.datastore
vops.get_datastore.return_value = datastore
tmp_backing = mock.sentinel.tmp_backing
if attached:
in_use.return_value = True
create_temp_backing_from_attached_vmdk.return_value = tmp_backing
else:
in_use.return_value = False
vops.clone_backing.return_value = tmp_backing
if mark_as_template_error:
vops.mark_backing_as_template.side_effect = (
exceptions.VimException())
else:
inv_path = mock.sentinel.inv_path
vops.get_inventory_path.return_value = inv_path
volume = self._create_volume_obj()
snapshot = fake_snapshot.fake_snapshot_obj(
self._context, volume=volume)
backing = mock.sentinel.backing
if mark_as_template_error:
self.assertRaises(
exceptions.VimException,
self._driver._create_snapshot_template_format,
snapshot,
backing)
delete_temp_backing.assert_called_once_with(tmp_backing)
else:
exp_result = {'provider_location': inv_path}
self.assertEqual(exp_result,
self._driver._create_snapshot_template_format(
snapshot, backing))
delete_temp_backing.assert_not_called()
get_snapshot_group_folder.test_assert_called_once_with(volume, backing)
vops.get_datastore.assert_called_once_with(backing)
in_use.assert_called_once_with(snapshot.volume)
if attached:
create_temp_backing_from_attached_vmdk.assert_called_once_with(
snapshot.volume, None, None, folder, datastore,
tmp_name=snapshot.name)
else:
vops.clone_backing.assert_called_once_with(
snapshot.name, backing, None, volumeops.FULL_CLONE_TYPE,
datastore, folder=folder)
vops.mark_backing_as_template.assert_called_once_with(tmp_backing)
def test_create_snapshot_template_format(self):
self._test_create_snapshot_template_format()
def test_create_snapshot_template_format_force(self):
self._test_create_snapshot_template_format(attached=True)
def test_create_snapshot_template_format_mark_template_error(self):
self._test_create_snapshot_template_format(mark_as_template_error=True)
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_snapshot_without_backing(self, vops, in_use):
vops.get_backing.return_value = None
volume = self._create_volume_dict()
snapshot = self._create_snapshot_dict(volume)
ret = self._driver.create_snapshot(snapshot)
self.assertIsNone(ret)
vops.get_backing.assert_called_once_with(snapshot['volume_name'],
snapshot['volume']['id'])
self.assertFalse(vops.create_snapshot.called)
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_snapshot_with_backing(self, vops, in_use):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
volume = self._create_volume_dict()
snapshot = self._create_snapshot_dict(volume)
ret = self._driver.create_snapshot(snapshot)
self.assertIsNone(ret)
vops.get_backing.assert_called_once_with(snapshot['volume_name'],
snapshot['volume']['id'])
vops.create_snapshot.assert_called_once_with(
backing, snapshot['name'], snapshot['display_description'])
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True)
def test_create_snapshot_when_attached(self, in_use):
volume = self._create_volume_dict(status='in-use')
snapshot = self._create_snapshot_dict(volume)
self.assertRaises(cinder_exceptions.InvalidVolume,
self._driver.create_snapshot, snapshot)
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_create_snapshot_template_format')
def test_create_snapshot_template(
self, create_snapshot_template_format, vops, in_use):
self._driver.configuration.vmware_snapshot_format = 'template'
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
model_update = mock.sentinel.model_update
create_snapshot_template_format.return_value = model_update
volume = self._create_volume_dict()
snapshot = self._create_snapshot_dict(volume)
ret = self._driver.create_snapshot(snapshot)
self.assertEqual(model_update, ret)
vops.get_backing.assert_called_once_with(snapshot['volume_name'],
snapshot['volume']['id'])
create_snapshot_template_format.assert_called_once_with(
snapshot, backing)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_template_by_inv_path(self, vops):
template = mock.sentinel.template
vops.get_entity_by_inventory_path.return_value = template
inv_path = mock.sentinel.inv_path
self.assertEqual(template,
self._driver._get_template_by_inv_path(inv_path))
vops.get_entity_by_inventory_path.assert_called_once_with(inv_path)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_template_by_inv_path_invalid_path(self, vops):
vops.get_entity_by_inventory_path.return_value = None
inv_path = mock.sentinel.inv_path
self.assertRaises(vmdk_exceptions.TemplateNotFoundException,
self._driver._get_template_by_inv_path,
inv_path)
vops.get_entity_by_inventory_path.assert_called_once_with(inv_path)
@mock.patch.object(VMDK_DRIVER, '_get_template_by_inv_path')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_delete_snapshot_template_format(
self, vops, get_template_by_inv_path):
template = mock.sentinel.template
get_template_by_inv_path.return_value = template
inv_path = '/dc-1/vm/foo'
volume = self._create_volume_dict()
snapshot = fake_snapshot.fake_snapshot_obj(self._context,
volume=volume,
provider_location=inv_path)
self._driver._delete_snapshot_template_format(snapshot)
get_template_by_inv_path.assert_called_once_with(inv_path)
vops.delete_backing.assert_called_once_with(template)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_delete_snapshot_without_backing(self, vops):
vops.get_backing.return_value = None
volume = self._create_volume_dict()
snapshot = fake_snapshot.fake_snapshot_obj(self._context,
volume=volume)
self._driver.delete_snapshot(snapshot)
vops.get_backing.assert_called_once_with(snapshot.volume_name,
snapshot.volume.id)
vops.get_snapshot.assert_not_called()
vops.delete_snapshot.assert_not_called()
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
def test_delete_snapshot_with_backing(self, in_use, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
volume = self._create_volume_dict(status='deleting')
snapshot = fake_snapshot.fake_snapshot_obj(self._context,
volume=volume)
self._driver.delete_snapshot(snapshot)
vops.get_backing.assert_called_once_with(snapshot.volume_name,
snapshot.volume.id)
vops.get_snapshot.assert_called_once_with(backing, snapshot.name)
in_use.assert_called_once_with(snapshot.volume)
vops.delete_snapshot.assert_called_once_with(
backing, snapshot.name)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True)
def test_delete_snapshot_when_attached(self, in_use, vops):
volume = self._create_volume_dict(status='in-use')
snapshot = fake_snapshot.fake_snapshot_obj(self._context,
volume=volume)
self.assertRaises(cinder_exceptions.InvalidSnapshot,
self._driver.delete_snapshot, snapshot)
in_use.assert_called_once_with(snapshot.volume)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_delete_snapshot_without_backend_snapshot(self, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
vops.get_snapshot.return_value = None
volume = self._create_volume_dict(status='in-use')
snapshot = fake_snapshot.fake_snapshot_obj(self._context,
volume=volume)
self._driver.delete_snapshot(snapshot)
vops.get_backing.assert_called_once_with(snapshot.volume_name,
snapshot.volume.id)
vops.get_snapshot.assert_called_once_with(backing, snapshot.name)
vops.delete_snapshot.assert_not_called()
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True)
@mock.patch.object(VMDK_DRIVER, '_delete_snapshot_template_format')
def test_delete_snapshot_template(
self, delete_snapshot_template_format, in_use, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
inv_path = '/dc-1/vm/foo'
volume = self._create_volume_dict(status='deleting')
snapshot = fake_snapshot.fake_snapshot_obj(self._context,
volume=volume,
provider_location=inv_path)
self._driver.delete_snapshot(snapshot)
vops.get_backing.assert_called_once_with(snapshot.volume_name,
snapshot.volume.id)
vops.get_snapshot.assert_not_called()
in_use.assert_called_once_with(snapshot.volume)
delete_snapshot_template_format.assert_called_once_with(snapshot)
@ddt.data('vmdk', 'VMDK', None)
def test_validate_disk_format(self, disk_format):
self._driver._validate_disk_format(disk_format)
def test_validate_disk_format_with_invalid_format(self):
self.assertRaises(cinder_exceptions.ImageUnacceptable,
self._driver._validate_disk_format,
'img')
def _create_image_meta(self,
_id=IMAGE_ID,
name=IMAGE_NAME,
disk_format='vmdk',
size=1 * units.Gi,
container_format='bare',
vmware_disktype='streamOptimized',
vmware_adaptertype='lsiLogic',
is_public=True):
return {'id': _id,
'name': name,
'disk_format': disk_format,
'size': size,
'container_format': container_format,
'properties': {'vmware_disktype': vmware_disktype,
'vmware_adaptertype': vmware_adaptertype,
},
'is_public': is_public,
}
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_validate_disk_format')
def test_copy_image_to_volume_with_invalid_container(self,
validate_disk_format):
image_service = mock.Mock()
image_meta = self._create_image_meta(container_format='ami')
image_service.show.return_value = image_meta
context = mock.sentinel.context
volume = self._create_volume_dict()
image_id = mock.sentinel.image_id
self.assertRaises(
cinder_exceptions.ImageUnacceptable,
self._driver.copy_image_to_volume, context, volume, image_service,
image_id)
validate_disk_format.assert_called_once_with(image_meta['disk_format'])
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_validate_disk_format')
@mock.patch.object(VMDK_DRIVER, '_get_adapter_type',
return_value=volumeops.VirtualDiskAdapterType.BUS_LOGIC)
@mock.patch('cinder.volume.drivers.vmware.volumeops.'
'VirtualDiskAdapterType.validate')
@mock.patch('cinder.volume.drivers.vmware.vmdk.ImageDiskType.'
'validate')
@mock.patch.object(VMDK_DRIVER,
'_create_volume_from_non_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER,
'_fetch_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
def _test_copy_image_to_volume(self,
extend_backing,
vops,
fetch_stream_optimized_image,
create_volume_from_non_stream_opt_image,
validate_image_disk_type,
validate_image_adapter_type,
get_adapter_type,
validate_disk_format,
get_disk_type,
vmware_disk_type='streamOptimized',
backing_disk_size=VOL_SIZE,
call_extend_backing=False,
container_format='bare'):
image_service = mock.Mock()
image_meta = self._create_image_meta(vmware_disktype=vmware_disk_type,
container_format=container_format)
image_service.show.return_value = image_meta
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
vops.get_disk_size.return_value = backing_disk_size * units.Gi
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
context = mock.sentinel.context
volume = self._create_volume_dict()
image_id = mock.sentinel.image_id
self._driver.copy_image_to_volume(
context, volume, image_service, image_id)
validate_disk_format.assert_called_once_with(image_meta['disk_format'])
validate_image_disk_type.assert_called_once_with(
image_meta['properties']['vmware_disktype'])
validate_image_adapter_type.assert_called_once_with(
image_meta['properties']['vmware_adaptertype'])
if vmware_disk_type == 'streamOptimized':
fetch_stream_optimized_image.assert_called_once_with(
context, volume, image_service, image_id, image_meta['size'],
image_meta['properties']['vmware_adaptertype'])
else:
create_volume_from_non_stream_opt_image.assert_called_once_with(
context, volume, image_service, image_id, image_meta['size'],
image_meta['properties']['vmware_adaptertype'],
image_meta['properties']['vmware_disktype'])
vops.get_disk_size.assert_called_once_with(backing)
if call_extend_backing:
extend_backing.assert_called_once_with(backing, volume['size'],
disk_type)
else:
self.assertFalse(extend_backing.called)
@ddt.data('sparse', 'preallocated', 'streamOptimized')
def test_copy_image_to_volume(self, vmware_disk_type):
self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type)
@ddt.data('sparse', 'preallocated', 'streamOptimized')
def test_copy_image_to_volume_with_extend_backing(self, vmware_disk_type):
self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type,
backing_disk_size=1,
call_extend_backing=True)
def test_copy_image_to_volume_with_ova_container(self):
self._test_copy_image_to_volume(container_format='ova')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_check_disk_conversion')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image')
@mock.patch.object(VMDK_DRIVER,
'_create_virtual_disk_from_preallocated_image')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
def _test_create_volume_from_non_stream_optimized_image(
self,
delete_tmp_backing,
select_ds_for_volume,
get_storage_profile_id,
create_disk_from_preallocated_image,
create_disk_from_sparse_image,
vops,
get_ds_name_folder_path,
create_backing,
generate_uuid,
check_disk_conversion,
get_disk_type,
image_disk_type='sparse',
disk_conversion=False):
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
check_disk_conversion.return_value = disk_conversion
volume = self._create_volume_dict()
if disk_conversion:
disk_name = "6b77b25a-9136-470e-899e-3c930e570d8e"
generate_uuid.return_value = disk_name
else:
disk_name = volume['name']
backing = mock.sentinel.backing
create_backing.return_value = backing
ds_name = mock.sentinel.ds_name
folder_path = mock.sentinel.folder_path
get_ds_name_folder_path.return_value = (ds_name, folder_path)
host = mock.sentinel.host
dc_ref = mock.sentinel.dc_ref
vops.get_host.return_value = host
vops.get_dc.return_value = dc_ref
vmdk_path = mock.Mock(spec=volumeops.FlatExtentVirtualDiskPath)
create_disk_from_sparse_image.return_value = vmdk_path
create_disk_from_preallocated_image.return_value = vmdk_path
profile_id = mock.sentinel.profile_id
get_storage_profile_id.return_value = profile_id
if disk_conversion:
rp = mock.sentinel.rp
folder = mock.sentinel.folder
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
select_ds_for_volume.return_value = (host, rp, folder, summary)
clone = mock.sentinel.clone
vops.clone_backing.return_value = clone
context = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = units.Gi
adapter_type = mock.sentinel.adapter_type
self._driver._create_volume_from_non_stream_optimized_image(
context, volume, image_service, image_id, image_size_in_bytes,
adapter_type, image_disk_type)
check_disk_conversion.assert_called_once_with(image_disk_type,
mock.sentinel.disk_type)
if disk_conversion:
create_backing.assert_called_once_with(
volume,
create_params={vmdk.CREATE_PARAM_DISK_LESS: True,
vmdk.CREATE_PARAM_BACKING_NAME: disk_name,
vmdk.CREATE_PARAM_TEMP_BACKING: True})
else:
create_backing.assert_called_once_with(
volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True})
if image_disk_type == 'sparse':
create_disk_from_sparse_image.assert_called_once_with(
context, image_service, image_id, image_size_in_bytes,
dc_ref, ds_name, folder_path, disk_name)
else:
create_disk_from_preallocated_image.assert_called_once_with(
context, image_service, image_id, image_size_in_bytes,
dc_ref, ds_name, folder_path, disk_name, adapter_type)
get_storage_profile_id.assert_called_once_with(volume)
vops.attach_disk_to_backing.assert_called_once_with(
backing, image_size_in_bytes / units.Ki, disk_type,
adapter_type, profile_id, vmdk_path.get_descriptor_ds_file_path())
if disk_conversion:
select_ds_for_volume.assert_called_once_with(volume)
extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'],
volumeops.BACKING_UUID_KEY: volume['id']}
vops.clone_backing.assert_called_once_with(
volume['name'], backing, None, volumeops.FULL_CLONE_TYPE,
datastore, disk_type=disk_type, host=host, resource_pool=rp,
extra_config=extra_config, folder=folder)
delete_tmp_backing.assert_called_once_with(backing)
vops.update_backing_disk_uuid(clone, volume['id'])
else:
vops.update_backing_disk_uuid(backing, volume['id'])
@ddt.data('sparse', 'preallocated')
def test_create_volume_from_non_stream_optimized_image(self,
image_disk_type):
self._test_create_volume_from_non_stream_optimized_image(
image_disk_type=image_disk_type)
@ddt.data('sparse', 'preallocated')
def test_create_volume_from_non_stream_opt_image_with_disk_conversion(
self, image_disk_type):
self._test_create_volume_from_non_stream_optimized_image(
image_disk_type=image_disk_type, disk_conversion=True)
def _test_get_vsphere_url(self, direct_url, exp_vsphere_url=None):
image_service = mock.Mock()
image_service.get_location.return_value = (direct_url, [])
context = mock.sentinel.context
image_id = mock.sentinel.image_id
ret = self._driver._get_vsphere_url(context, image_service, image_id)
self.assertEqual(exp_vsphere_url, ret)
image_service.get_location.assert_called_once_with(context, image_id)
def test_get_vsphere_url(self):
url = "vsphere://foo/folder/glance/img_uuid?dcPath=dc1&dsName=ds1"
self._test_get_vsphere_url(url, exp_vsphere_url=url)
def test_get_vsphere_url_(self):
url = "http://foo/folder/glance/img_uuid?dcPath=dc1&dsName=ds1"
self._test_get_vsphere_url(url)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_get_vsphere_url')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def _test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, get_vsphere_url, flat_extent_path,
generate_uuid, get_temp_image_folder, copy_temp_virtual_disk,
vsphere_url=None):
dc_ref = mock.Mock(value=mock.sentinel.dc_ref)
ds_name = mock.sentinel.ds_name
folder_path = mock.sentinel.folder_path
get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path)
uuid = mock.sentinel.uuid
generate_uuid.return_value = uuid
path = mock.Mock()
dest_path = mock.Mock()
flat_extent_path.side_effect = [path, dest_path]
get_vsphere_url.return_value = vsphere_url
context = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = 2 * units.Gi
dest_dc_ref = mock.sentinel.dest_dc_ref
dest_ds_name = mock.sentinel.dest_ds_name
dest_folder_path = mock.sentinel.dest_folder_path
dest_disk_name = mock.sentinel.dest_disk_name
adapter_type = mock.sentinel.adapter_type
ret = self._driver._create_virtual_disk_from_preallocated_image(
context, image_service, image_id, image_size_in_bytes, dest_dc_ref,
dest_ds_name, dest_folder_path, dest_disk_name, adapter_type)
exp_flat_extent_path_calls = [
mock.call(ds_name, folder_path, uuid),
mock.call(dest_ds_name, dest_folder_path, dest_disk_name)]
self.assertEqual(exp_flat_extent_path_calls,
flat_extent_path.call_args_list)
create_descriptor = vops.create_flat_extent_virtual_disk_descriptor
create_descriptor.assert_called_once_with(
dc_ref, path, image_size_in_bytes / units.Ki, adapter_type,
vmdk.EAGER_ZEROED_THICK_VMDK_TYPE)
get_vsphere_url.assert_called_once_with(
context, image_service, image_id)
if vsphere_url:
vops.copy_datastore_file.assert_called_once_with(
vsphere_url, dc_ref, path.get_flat_extent_ds_file_path())
else:
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, path.get_flat_extent_file_path())
copy_temp_virtual_disk.assert_called_once_with(dc_ref, path,
dest_dc_ref, dest_path)
self.assertEqual(dest_path, ret)
def test_create_virtual_disk_from_preallocated_image(self):
self._test_create_virtual_disk_from_preallocated_image()
def test_create_virtual_disk_from_preallocated_image_on_vsphere(self):
self._test_create_virtual_disk_from_preallocated_image(
vsphere_url=mock.sentinel.vsphere_url)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_get_vsphere_url', return_value=None)
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image_with_no_disk_copy(
self, vops, copy_image, get_vsphere_url, flat_extent_path,
get_temp_image_folder, copy_temp_virtual_disk):
dc_ref = mock.Mock(value=mock.sentinel.dc_ref)
ds_name = mock.sentinel.ds_name
folder_path = mock.sentinel.folder_path
get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path)
path = mock.Mock()
flat_extent_path.return_value = path
context = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = 2 * units.Gi
dest_dc_ref = mock.Mock(value=mock.sentinel.dc_ref)
dest_ds_name = ds_name
dest_folder_path = mock.sentinel.dest_folder_path
dest_disk_name = mock.sentinel.dest_disk_name
adapter_type = mock.sentinel.adapter_type
ret = self._driver._create_virtual_disk_from_preallocated_image(
context, image_service, image_id, image_size_in_bytes, dest_dc_ref,
dest_ds_name, dest_folder_path, dest_disk_name, adapter_type)
flat_extent_path.assert_called_once_with(
dest_ds_name, dest_folder_path, dest_disk_name)
create_descriptor = vops.create_flat_extent_virtual_disk_descriptor
create_descriptor.assert_called_once_with(
dc_ref, path, image_size_in_bytes / units.Ki, adapter_type,
vmdk.EAGER_ZEROED_THICK_VMDK_TYPE)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, path.get_flat_extent_file_path())
self.assertFalse(copy_temp_virtual_disk.called)
self.assertEqual(path, ret)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_get_vsphere_url', return_value=None)
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image_with_copy_error(
self, vops, copy_image, get_vsphere_url, flat_extent_path,
generate_uuid, get_temp_image_folder, copy_temp_virtual_disk):
dc_ref = mock.Mock(value=mock.sentinel.dc_ref)
ds_name = mock.sentinel.ds_name
folder_path = mock.sentinel.folder_path
get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path)
uuid = mock.sentinel.uuid
generate_uuid.return_value = uuid
path = mock.Mock()
dest_path = mock.Mock()
flat_extent_path.side_effect = [path, dest_path]
copy_image.side_effect = exceptions.VimException("error")
context = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = 2 * units.Gi
dest_dc_ref = mock.sentinel.dest_dc_ref
dest_ds_name = mock.sentinel.dest_ds_name
dest_folder_path = mock.sentinel.dest_folder_path
dest_disk_name = mock.sentinel.dest_disk_name
adapter_type = mock.sentinel.adapter_type
self.assertRaises(
exceptions.VimException,
self._driver._create_virtual_disk_from_preallocated_image,
context, image_service, image_id, image_size_in_bytes, dest_dc_ref,
dest_ds_name, dest_folder_path, dest_disk_name, adapter_type)
vops.delete_file.assert_called_once_with(
path.get_descriptor_ds_file_path(), dc_ref)
self.assertFalse(copy_temp_virtual_disk.called)
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.'
'MonolithicSparseVirtualDiskPath')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_vsphere_url')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def _test_create_virtual_disk_from_sparse_image(
self, vops, copy_image, get_vsphere_url, copy_temp_virtual_disk,
flat_extent_path, sparse_path, generate_uuid, vsphere_url=None):
uuid = mock.sentinel.uuid
generate_uuid.return_value = uuid
src_path = mock.Mock()
sparse_path.return_value = src_path
dest_path = mock.Mock()
flat_extent_path.return_value = dest_path
get_vsphere_url.return_value = vsphere_url
context = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = 2 * units.Gi
dc_ref = mock.sentinel.dc_ref
ds_name = mock.sentinel.ds_name
folder_path = mock.sentinel.folder_path
disk_name = mock.sentinel.disk_name
ret = self._driver._create_virtual_disk_from_sparse_image(
context, image_service, image_id, image_size_in_bytes, dc_ref,
ds_name, folder_path, disk_name)
sparse_path.assert_called_once_with(ds_name, folder_path, uuid)
get_vsphere_url.assert_called_once_with(
context, image_service, image_id)
if vsphere_url:
vops.copy_datastore_file.assert_called_once_with(
vsphere_url, dc_ref, src_path.get_descriptor_ds_file_path())
else:
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, src_path.get_descriptor_file_path())
flat_extent_path.assert_called_once_with(
ds_name, folder_path, disk_name)
copy_temp_virtual_disk.assert_called_once_with(
dc_ref, src_path, dc_ref, dest_path)
self.assertEqual(dest_path, ret)
def test_create_virtual_disk_from_sparse_image(self):
self._test_create_virtual_disk_from_sparse_image()
def test_create_virtual_disk_from_sparse_image_on_vsphere(self):
self._test_create_virtual_disk_from_sparse_image(
vsphere_url=mock.sentinel.vsphere_url)
@mock.patch.object(VMDK_DRIVER, '_select_datastore')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_temp_image_folder(self, vops, select_datastore):
host = mock.sentinel.host
resource_pool = mock.sentinel.rp
summary = mock.Mock()
ds_name = mock.sentinel.ds_name
summary.name = ds_name
select_datastore.return_value = (host, resource_pool, summary)
dc = mock.sentinel.dc
vops.get_dc.return_value = dc
image_size = 2 * units.Gi
ret = self._driver._get_temp_image_folder(image_size)
self.assertEqual((dc, ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH),
ret)
exp_req = {
hub.DatastoreSelector.SIZE_BYTES: image_size,
hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE:
{hub.DatastoreType.VMFS,
hub.DatastoreType.NFS,
hub.DatastoreType.NFS41}}
select_datastore.assert_called_once_with(exp_req)
vops.create_datastore_folder.assert_called_once_with(
ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH, dc)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_extra_config')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(image_transfer, 'download_stream_optimized_image')
def _test_copy_image_to_volume_stream_optimized(self,
download_image,
session,
vops,
get_extra_config,
get_disk_type,
get_profile_id,
select_ds_for_volume,
download_error=False):
host = mock.sentinel.host
rp = mock.sentinel.rp
folder = mock.sentinel.folder
# NOTE(mriedem): The summary.name gets logged so it has to be a string
summary = mock.Mock(name=six.text_type(mock.sentinel.ds_name))
select_ds_for_volume.return_value = (host, rp, folder, summary)
profile_id = mock.sentinel.profile_id
get_profile_id.return_value = profile_id
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
extra_config = mock.sentinel.extra_config
get_extra_config.return_value = extra_config
vm_create_spec = mock.sentinel.vm_create_spec
vops.get_create_spec.return_value = vm_create_spec
import_spec = mock.Mock()
session.vim.client.factory.create.return_value = import_spec
backing = mock.sentinel.backing
if download_error:
download_image.side_effect = exceptions.VimException
vops.get_backing.return_value = backing
else:
download_image.return_value = backing
context = mock.sentinel.context
volume = self._create_volume_dict(size=3)
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size = 2 * units.Gi
adapter_type = mock.sentinel.adapter_type
if download_error:
self.assertRaises(
exceptions.VimException,
self._driver._fetch_stream_optimized_image,
context, volume, image_service, image_id,
image_size, adapter_type)
else:
self._driver._fetch_stream_optimized_image(
context, volume, image_service, image_id, image_size,
adapter_type)
select_ds_for_volume.assert_called_once_with(volume)
vops.get_create_spec.assert_called_once_with(
volume['name'], 0, disk_type, summary.name, profile_id=profile_id,
adapter_type=adapter_type, extra_config=extra_config)
self.assertEqual(vm_create_spec, import_spec.configSpec)
download_image.assert_called_with(
context,
self._config.vmware_image_transfer_timeout_secs,
image_service,
image_id,
session=session,
host=self._config.vmware_host_ip,
port=self._config.vmware_host_port,
resource_pool=rp,
vm_folder=folder,
vm_import_spec=import_spec,
image_size=image_size,
http_method='POST')
if download_error:
self.assertFalse(vops.update_backing_disk_uuid.called)
vops.delete_backing.assert_called_once_with(backing)
else:
vops.update_backing_disk_uuid.assert_called_once_with(
backing, volume['id'])
def test_copy_image_to_volume_stream_optimized(self):
self._test_copy_image_to_volume_stream_optimized()
def test_copy_image_to_volume_stream_optimized_with_download_error(self):
self._test_copy_image_to_volume_stream_optimized(download_error=True)
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True)
def test_copy_volume_to_image_when_attached(self, in_use):
volume = self._create_volume_dict(
status="uploading",
attachment=[mock.sentinel.attachment_1])
self.assertRaises(
cinder_exceptions.InvalidVolume,
self._driver.copy_volume_to_image,
mock.sentinel.context,
volume,
mock.sentinel.image_service,
mock.sentinel.image_meta)
in_use.assert_called_once_with(volume)
@mock.patch.object(VMDK_DRIVER, '_validate_disk_format')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch('oslo_vmware.image_transfer.upload_image')
@mock.patch.object(VMDK_DRIVER, 'session')
def _test_copy_volume_to_image(
self, session, upload_image, create_backing, vops,
validate_disk_format, backing_exists=True):
backing = mock.sentinel.backing
if backing_exists:
vops.get_backing.return_value = backing
else:
vops.get_backing.return_value = None
create_backing.return_value = backing
vmdk_file_path = mock.sentinel.vmdk_file_path
vops.get_vmdk_path.return_value = vmdk_file_path
context = mock.sentinel.context
volume = self._create_volume_dict()
image_service = mock.sentinel.image_service
image_meta = self._create_image_meta()
self._driver.copy_volume_to_image(
context, volume, image_service, image_meta)
validate_disk_format.assert_called_once_with(image_meta['disk_format'])
vops.get_backing.assert_called_once_with(volume['name'], volume['id'])
if not backing_exists:
create_backing.assert_called_once_with(volume)
vops.get_vmdk_path.assert_called_once_with(backing)
upload_image.assert_called_once_with(
context,
self._config.vmware_image_transfer_timeout_secs,
image_service,
image_meta['id'],
volume['project_id'],
session=session,
host=self._config.vmware_host_ip,
port=self._config.vmware_host_port,
vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=volume['size'] * units.Gi,
image_name=image_meta['name'],
image_version=1)
def test_copy_volume_to_image(self):
self._test_copy_volume_to_image()
def test_copy_volume_to_image_with_no_backing(self):
self._test_copy_volume_to_image(backing_exists=False)
def test_in_use(self):
volume = self._create_volume_dict(
attachment=[mock.sentinel.attachment_1])
self.assertTrue(self._driver._in_use(volume))
def test_in_use_with_available_volume(self):
volume = self._create_volume_dict()
self.assertIsNone(self._driver._in_use(volume))
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True)
def test_retype_with_in_use_volume(self, in_use):
context = mock.sentinel.context
volume = self._create_volume_dict(
status='retyping', attachment=[mock.sentinel.attachment_1])
new_type = mock.sentinel.new_type
diff = mock.sentinel.diff
host = mock.sentinel.host
self.assertFalse(self._driver.retype(context, volume, new_type, diff,
host))
in_use.assert_called_once_with(volume)
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_retype_with_no_volume_backing(self, vops, in_use):
vops.get_backing.return_value = None
context = mock.sentinel.context
volume = self._create_volume_dict(status='retyping')
new_type = mock.sentinel.new_type
diff = mock.sentinel.diff
host = mock.sentinel.host
self.assertTrue(self._driver.retype(context, volume, new_type, diff,
host))
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_extra_spec_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
@mock.patch.object(VMDK_DRIVER, '_select_datastore')
@mock.patch.object(
VMDK_DRIVER, '_get_adapter_type', return_value='lsiLogic')
@mock.patch.object(
VMDK_DRIVER, '_get_extra_spec_adapter_type', return_value='lsiLogic')
def test_retype_with_diff_profile_and_ds_compliance(
self,
_get_extra_spec_adapter_type,
_get_adapter_type,
select_datastore,
ds_sel,
get_extra_spec_storage_profile,
get_storage_profile,
get_extra_spec_disk_type,
get_disk_type,
vops,
in_use):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
datastore = mock.Mock(value='ds1')
vops.get_datastore.return_value = datastore
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
get_extra_spec_disk_type.return_value = disk_type
self._driver._storage_policy_enabled = True
profile = 'gold'
get_storage_profile.return_value = profile
new_profile = 'silver'
get_extra_spec_storage_profile.return_value = new_profile
ds_sel.is_datastore_compliant.return_value = True
new_profile_id = mock.sentinel.new_profile_id
ds_sel.get_profile_id.return_value = new_profile_id
context = mock.sentinel.context
volume = self._create_volume_dict(status='retyping')
new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'}
diff = mock.sentinel.diff
host = mock.sentinel.host
self.assertTrue(self._driver.retype(context, volume, new_type, diff,
host))
ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
new_profile)
select_datastore.assert_not_called()
vops.change_backing_profile.assert_called_once_with(backing,
new_profile_id)
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_extra_spec_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
@mock.patch.object(VMDK_DRIVER, '_select_datastore')
def test_retype_with_diff_profile_and_ds_sel_no_candidate(
self, select_datastore, ds_sel, get_extra_spec_storage_profile,
get_storage_profile, get_extra_spec_disk_type, get_disk_type,
vops, in_use):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
datastore = mock.Mock(value='ds1')
vops.get_datastore.return_value = datastore
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
get_extra_spec_disk_type.return_value = disk_type
vops.snapshot_exists.return_value = False
self._driver._storage_policy_enabled = True
profile = 'gold'
get_storage_profile.return_value = profile
new_profile = 'silver'
get_extra_spec_storage_profile.return_value = new_profile
ds_sel.is_datastore_compliant.return_value = False
select_datastore.side_effect = (
vmdk_exceptions.NoValidDatastoreException)
context = mock.sentinel.context
volume = self._create_volume_dict(status='retyping')
new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'}
diff = mock.sentinel.diff
host = mock.sentinel.host
self.assertFalse(self._driver.retype(context, volume, new_type, diff,
host))
ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
new_profile)
select_datastore.assert_called_once_with(
{hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
hub.DatastoreSelector.PROFILE_NAME: new_profile})
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_extra_spec_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
@mock.patch.object(VMDK_DRIVER, '_select_datastore')
@mock.patch.object(VMDK_DRIVER, '_get_dc')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch.object(
VMDK_DRIVER, '_get_adapter_type', return_value='lsiLogic')
@mock.patch.object(
VMDK_DRIVER, '_get_extra_spec_adapter_type', return_value='lsiLogic')
def test_retype_with_diff_extra_spec_and_vol_snapshot(
self,
get_extra_spec_adapter_type,
get_adapter_type,
get_volume_group_folder,
get_dc,
select_datastore,
ds_sel, get_extra_spec_storage_profile,
get_storage_profile,
get_extra_spec_disk_type,
get_disk_type,
vops,
in_use):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
datastore = mock.Mock(value='ds1')
vops.get_datastore.return_value = datastore
get_disk_type.return_value = 'thin'
new_disk_type = 'thick'
get_extra_spec_disk_type.return_value = new_disk_type
vops.snapshot_exists.return_value = True
self._driver._storage_policy_enabled = True
profile = 'gold'
get_storage_profile.return_value = profile
new_profile = 'silver'
get_extra_spec_storage_profile.return_value = new_profile
ds_sel.is_datastore_compliant.return_value = False
host = mock.sentinel.host
rp = mock.sentinel.rp
new_datastore = mock.Mock(value='ds2')
summary = mock.Mock(datastore=new_datastore)
select_datastore.return_value = (host, rp, summary)
dc = mock.sentinel.dc
get_dc.return_value = dc
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
new_profile_id = mock.sentinel.new_profile_id
ds_sel.get_profile_id.return_value = new_profile_id
context = mock.sentinel.context
volume = self._create_volume_dict(status='retyping')
new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'}
diff = mock.sentinel.diff
host = mock.sentinel.host
self.assertTrue(self._driver.retype(context, volume, new_type, diff,
host))
ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
new_profile)
select_datastore.assert_called_once_with(
{hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: ['ds1'],
hub.DatastoreSelector.PROFILE_NAME: new_profile})
get_dc.assert_called_once_with(rp)
get_volume_group_folder.assert_called_once_with(dc,
volume['project_id'])
vops.relocate_backing.assert_called_once_with(
backing, new_datastore, rp, host, new_disk_type)
vops.move_backing_to_folder.assert_called_once_with(backing, folder)
vops.change_backing_profile.assert_called_once_with(backing,
new_profile_id)
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_extra_spec_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
@mock.patch.object(VMDK_DRIVER, '_select_datastore')
@mock.patch.object(VMDK_DRIVER, '_get_dc')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(VMDK_DRIVER, '_get_adapter_type')
@mock.patch.object(VMDK_DRIVER, '_get_extra_spec_adapter_type')
def _test_retype_with_diff_extra_spec_and_ds_compliance(
self,
get_extra_spec_adapter_type,
get_adapter_type,
delete_temp_backing,
generate_uuid,
get_volume_group_folder,
get_dc,
select_datastore,
ds_sel,
get_extra_spec_storage_profile,
get_storage_profile,
get_extra_spec_disk_type,
get_disk_type,
vops,
in_use,
clone_error=False):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
datastore = mock.Mock(value='ds1')
vops.get_datastore.return_value = datastore
get_disk_type.return_value = 'thin'
new_disk_type = 'thick'
get_extra_spec_disk_type.return_value = new_disk_type
vops.snapshot_exists.return_value = False
self._driver._storage_policy_enabled = True
profile = 'gold'
get_storage_profile.return_value = profile
new_profile = 'silver'
get_extra_spec_storage_profile.return_value = new_profile
ds_sel.is_datastore_compliant.return_value = True
host = mock.sentinel.host
rp = mock.sentinel.rp
summary = mock.Mock(datastore=datastore)
select_datastore.return_value = (host, rp, summary)
dc = mock.sentinel.dc
get_dc.return_value = dc
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
new_profile_id = mock.sentinel.new_profile_id
ds_sel.get_profile_id.return_value = new_profile_id
uuid = '025b654b-d4ed-47f9-8014-b71a7744eafc'
generate_uuid.return_value = uuid
if clone_error:
vops.clone_backing.side_effect = exceptions.VimException
else:
new_backing = mock.sentinel.new_backing
vops.clone_backing.return_value = new_backing
adapter_type = 'lsiLogic'
get_adapter_type.return_value = adapter_type
new_adapter_type = 'paraVirtual'
get_extra_spec_adapter_type.return_value = new_adapter_type
capacity = self.VOL_SIZE * units.Mi
filename = mock.sentinel.filename
disk_backing = mock.Mock(filename=filename)
disk_device = mock.Mock(capacityInKB=capacity, backing=disk_backing)
vops._get_disk_device.return_value = disk_device
context = mock.sentinel.context
volume = self._create_volume_dict(status='retyping')
new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'}
diff = mock.sentinel.diff
host = mock.sentinel.host
if clone_error:
self.assertRaises(exceptions.VimException, self._driver.retype,
context, volume, new_type, diff, host)
else:
self.assertTrue(self._driver.retype(context, volume, new_type,
diff, host))
ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
new_profile)
select_datastore.assert_called_once_with(
{hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
hub.DatastoreSelector.PROFILE_NAME: new_profile})
get_dc.assert_called_once_with(rp)
get_volume_group_folder.assert_called_once_with(dc,
volume['project_id'])
vops.clone_backing.assert_called_once_with(
volume['name'], backing, None, volumeops.FULL_CLONE_TYPE,
datastore, disk_type=new_disk_type, host=host, resource_pool=rp,
folder=folder)
if clone_error:
exp_rename_calls = [mock.call(backing, uuid),
mock.call(backing, volume['name'])]
self.assertEqual(exp_rename_calls,
vops.rename_backing.call_args_list)
else:
vops.rename_backing.assert_called_once_with(backing, uuid)
vops.update_backing_uuid.assert_called_once_with(
new_backing, volume['id'])
vops.update_backing_disk_uuid.assert_called_once_with(
new_backing, volume['id'])
delete_temp_backing.assert_called_once_with(backing)
vops.detach_disk_from_backing.assert_called_once_with(
new_backing, disk_device)
vops.attach_disk_to_backing.assert_called_once_with(
new_backing, disk_device.capacityInKB, new_disk_type,
new_adapter_type, None, disk_device.backing.fileName)
vops.change_backing_profile.assert_called_once_with(new_backing,
new_profile_id)
def test_retype_with_diff_extra_spec_and_ds_compliance(self):
self._test_retype_with_diff_extra_spec_and_ds_compliance()
def test_retype_with_diff_extra_spec_ds_compliance_and_clone_error(self):
self._test_retype_with_diff_extra_spec_and_ds_compliance(
clone_error=True)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_backing(self, vops):
vmdk_path = mock.sentinel.vmdk_path
vops.get_vmdk_path.return_value = vmdk_path
dc = mock.sentinel.datacenter
vops.get_dc.return_value = dc
disk_type = mock.sentinel.disk_type
eager_zero = (True if disk_type == "eagerZeroedThick" else False)
backing = mock.sentinel.backing
new_size = 1
self._driver._extend_backing(backing, new_size, disk_type)
vops.get_vmdk_path.assert_called_once_with(backing)
vops.get_dc.assert_called_once_with(backing)
vops.extend_virtual_disk.assert_called_once_with(new_size,
vmdk_path,
dc,
eager_zero)
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch('oslo_vmware.vim_util.get_vc_version')
def test_get_vc_version(self, get_vc_version, session):
self._driver.configuration.vmware_host_version = None
version_str = '6.0.0'
get_vc_version.return_value = version_str
version = self._driver._get_vc_version()
self.assertEqual(version_str, version)
get_vc_version.assert_called_once_with(session)
@mock.patch('oslo_vmware.vim_util.get_vc_version')
def test_get_vc_version_override(self, get_vc_version):
version = self._driver._get_vc_version()
self.assertEqual(
self._driver.configuration.vmware_host_version,
version)
get_vc_version.assert_not_called()
@mock.patch('cinder.volume.drivers.vmware.vmdk.LOG')
@ddt.data('5.5', '6.0')
def test_validate_vcenter_version(self, version, log):
# vCenter versions 5.5 and above should pass validation.
self._driver._validate_vcenter_version(version)
# Deprecation warning should be logged for vCenter versions which are
# incompatible with next minimum supported version.
if not versionutils.is_compatible(
self._driver.NEXT_MIN_SUPPORTED_VC_VERSION, version,
same_major=False):
log.warning.assert_called_once()
else:
log.warning.assert_not_called()
def test_validate_vcenter_version_with_less_than_min_supported_version(
self):
# Validation should fail for vCenter version less than 5.1.
self.assertRaises(exceptions.VMwareDriverException,
self._driver._validate_vcenter_version,
'5.1')
@mock.patch('oslo_vmware.vim_util.find_extension')
@mock.patch('oslo_vmware.vim_util.register_extension')
@mock.patch.object(VMDK_DRIVER, 'session')
def _test_register_extension(
self, session, register_extension, find_extension,
ext_exists=False):
if not ext_exists:
find_extension.return_value = None
self._driver._register_extension()
find_extension.assert_called_once_with(session.vim, vmdk.EXTENSION_KEY)
if not ext_exists:
register_extension.assert_called_once_with(
session.vim, vmdk.EXTENSION_KEY, vmdk.EXTENSION_TYPE,
label='OpenStack Cinder')
def test_register_extension(self):
self._test_register_extension()
def test_register_extension_with_existing_extension(self):
self._test_register_extension(ext_exists=True)
@mock.patch('oslo_vmware.vim_util.find_extension', return_value=None)
@mock.patch('oslo_vmware.vim_util.register_extension')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_concurrent_register_extension(
self, session, register_extension, find_extension):
register_extension.side_effect = exceptions.VimFaultException(
['InvalidArgument'], 'error')
self._driver._register_extension()
find_extension.assert_called_once_with(session.vim, vmdk.EXTENSION_KEY)
register_extension.assert_called_once_with(
session.vim, vmdk.EXTENSION_KEY, vmdk.EXTENSION_TYPE,
label='OpenStack Cinder')
@mock.patch('oslo_vmware.vim_util.find_extension', return_value=None)
@mock.patch('oslo_vmware.vim_util.register_extension')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_register_extension_failure(
self, session, register_extension, find_extension):
register_extension.side_effect = exceptions.VimFaultException(
['RuntimeFault'], 'error')
self.assertRaises(exceptions.VimFaultException,
self._driver._register_extension)
find_extension.assert_called_once_with(session.vim, vmdk.EXTENSION_KEY)
register_extension.assert_called_once_with(
session.vim, vmdk.EXTENSION_KEY, vmdk.EXTENSION_TYPE,
label='OpenStack Cinder')
@mock.patch.object(VMDK_DRIVER, '_validate_params')
@mock.patch('re.compile')
@mock.patch.object(VMDK_DRIVER, '_create_session')
@mock.patch.object(VMDK_DRIVER, '_get_vc_version')
@mock.patch.object(VMDK_DRIVER, '_validate_vcenter_version')
@mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location')
@mock.patch.object(VMDK_DRIVER, '_register_extension')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps')
@mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
def _test_do_setup(
self, session, vops, ds_sel_cls, vops_cls, register_extension,
get_pbm_wsdl_loc, validate_vc_version, get_vc_version,
create_session, re_compile, validate_params, enable_pbm=True,
ds_regex_pat=None, invalid_regex=False):
mock_session = mock.Mock()
create_session.return_value = mock_session
if enable_pbm:
ver_str = '5.5'
pbm_wsdl = mock.sentinel.pbm_wsdl
get_pbm_wsdl_loc.return_value = pbm_wsdl
else:
ver_str = '5.1'
get_vc_version.return_value = ver_str
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
cluster_refs = {'cls-1': cls_1, 'cls-2': cls_2}
vops.get_cluster_refs.return_value = cluster_refs
self._driver.configuration.vmware_datastore_regex = ds_regex_pat
ds_regex = None
if ds_regex_pat:
if invalid_regex:
re_compile.side_effect = re.error("error")
else:
ds_regex = mock.sentinel.ds_regex
re_compile.return_value = ds_regex
if ds_regex_pat and invalid_regex:
self.assertRaises(cinder_exceptions.InvalidInput,
self._driver.do_setup,
mock.ANY)
validate_params.assert_called_once_with()
else:
self._driver.do_setup(mock.ANY)
validate_params.assert_called_once_with()
create_session.assert_called_once_with()
get_vc_version.assert_called_once_with()
validate_vc_version.assert_called_once_with(ver_str)
if enable_pbm:
get_pbm_wsdl_loc.assert_called_once_with(ver_str)
mock_session.pbm_wsdl_loc_set.assert_called_once_with(pbm_wsdl)
self.assertEqual(enable_pbm, self._driver._storage_policy_enabled)
register_extension.assert_called_once()
vops_cls.assert_called_once_with(
session,
self._driver.configuration.vmware_max_objects_retrieval,
vmdk.EXTENSION_KEY,
vmdk.EXTENSION_TYPE)
self.assertEqual(vops_cls.return_value, self._driver._volumeops)
ds_sel_cls.assert_called_once_with(
vops,
session,
self._driver.configuration.vmware_max_objects_retrieval,
ds_regex=ds_regex)
self.assertEqual(ds_sel_cls.return_value, self._driver._ds_sel)
vops.get_cluster_refs.assert_called_once_with(
self._driver.configuration.vmware_cluster_name)
vops.build_backing_ref_cache.assert_called_once_with()
self.assertEqual(list(cluster_refs.values()),
list(self._driver._clusters))
if ds_regex_pat:
re_compile.assert_called_once_with(ds_regex_pat)
def test_do_setup(self):
self._test_do_setup()
def test_do_setup_with_pbm_disabled(self):
self._test_do_setup(enable_pbm=False)
@mock.patch.object(VMDK_DRIVER, '_validate_params')
@mock.patch.object(VMDK_DRIVER, '_create_session')
@mock.patch.object(VMDK_DRIVER, '_get_vc_version')
@mock.patch.object(VMDK_DRIVER, '_validate_vcenter_version')
@mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location')
def test_do_setup_with_invalid_pbm_wsdl(
self, get_pbm_wsdl_loc, validate_vc_version, get_vc_version,
create_session, validate_params):
ver_str = '5.5'
get_vc_version.return_value = ver_str
get_pbm_wsdl_loc.return_value = None
self.assertRaises(exceptions.VMwareDriverException,
self._driver.do_setup,
mock.ANY)
validate_params.assert_called_once_with()
create_session.assert_called_once_with()
get_vc_version.assert_called_once_with()
validate_vc_version.assert_called_once_with(ver_str)
get_pbm_wsdl_loc.assert_called_once_with(ver_str)
def test_do_setup_with_ds_regex(self):
self._test_do_setup(ds_regex_pat='foo')
def test_do_setup_with_invalid_ds_regex(self):
self._test_do_setup(ds_regex_pat='(foo', invalid_regex=True)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_dc(self, vops):
dc_1 = mock.sentinel.dc_1
dc_2 = mock.sentinel.dc_2
vops.get_dc.side_effect = [dc_1, dc_2]
# cache miss
rp_1 = mock.Mock(value='rp-1')
rp_2 = mock.Mock(value='rp-2')
self.assertEqual(dc_1, self._driver._get_dc(rp_1))
self.assertEqual(dc_2, self._driver._get_dc(rp_2))
self.assertDictEqual({'rp-1': dc_1, 'rp-2': dc_2},
self._driver._dc_cache)
# cache hit
self.assertEqual(dc_1, self._driver._get_dc(rp_1))
self.assertEqual(dc_2, self._driver._get_dc(rp_2))
vops.get_dc.assert_has_calls([mock.call(rp_1), mock.call(rp_2)])
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, '_select_datastore')
@mock.patch.object(VMDK_DRIVER, '_get_dc')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@ddt.data(None, {vmdk.CREATE_PARAM_DISK_SIZE: 2 * VOL_SIZE})
def test_select_ds_for_volume(
self, create_params, get_volume_group_folder, vops, get_dc,
select_datastore, get_storage_profile):
profile = mock.sentinel.profile
get_storage_profile.return_value = profile
host = mock.sentinel.host
rp = mock.sentinel.rp
summary = mock.sentinel.summary
select_datastore.return_value = (host, rp, summary)
dc = mock.sentinel.dc
get_dc.return_value = dc
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
vol = self._create_volume_dict()
ret = self._driver._select_ds_for_volume(
vol, host=host, create_params=create_params)
self.assertEqual((host, rp, folder, summary), ret)
if create_params:
exp_size = create_params[vmdk.CREATE_PARAM_DISK_SIZE] * units.Gi
else:
exp_size = vol['size'] * units.Gi
exp_req = {hub.DatastoreSelector.SIZE_BYTES: exp_size,
hub.DatastoreSelector.PROFILE_NAME: profile}
select_datastore.assert_called_once_with(exp_req, host)
get_dc.assert_called_once_with(rp)
get_volume_group_folder.assert_called_once_with(dc, vol['project_id'])
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
def _test_get_connection_info(
self, get_storage_profile_id, vops, vmdk_connector=False):
volume = self._create_volume_obj()
backing = mock.Mock(value='ref-1')
profile_id = mock.sentinel.profile_id
get_storage_profile_id.return_value = profile_id
if vmdk_connector:
vmdk_path = mock.sentinel.vmdk_path
vops.get_vmdk_path.return_value = vmdk_path
datastore = mock.Mock(value='ds-1')
vops.get_datastore.return_value = datastore
datacenter = mock.Mock(value='dc-1')
vops.get_dc.return_value = datacenter
connector = {'platform': mock.sentinel.platform,
'os_type': mock.sentinel.os_type}
else:
connector = {'instance': 'vm-1'}
ret = self._driver._get_connection_info(volume, backing, connector)
self.assertEqual('vmdk', ret['driver_volume_type'])
self.assertEqual('ref-1', ret['data']['volume'])
self.assertEqual(volume.id, ret['data']['volume_id'])
self.assertEqual(volume.name, ret['data']['name'])
self.assertEqual(profile_id, ret['data']['profile_id'])
if vmdk_connector:
self.assertEqual(volume.size * units.Gi, ret['data']['vmdk_size'])
self.assertEqual(vmdk_path, ret['data']['vmdk_path'])
self.assertEqual('ds-1', ret['data']['datastore'])
self.assertEqual('dc-1', ret['data']['datacenter'])
config = self._driver.configuration
exp_config = {
'vmware_host_ip': config.vmware_host_ip,
'vmware_host_port': config.vmware_host_port,
'vmware_host_username': config.vmware_host_username,
'vmware_host_password': config.vmware_host_password,
'vmware_api_retry_count': config.vmware_api_retry_count,
'vmware_task_poll_interval': config.vmware_task_poll_interval,
'vmware_ca_file': config.vmware_ca_file,
'vmware_insecure': config.vmware_insecure,
'vmware_tmp_dir': config.vmware_tmp_dir,
'vmware_image_transfer_timeout_secs':
config.vmware_image_transfer_timeout_secs,
}
self.assertEqual(exp_config, ret['data']['config'])
def test_get_connection_info(self):
self._test_get_connection_info()
def test_get_connection_info_vmdk_connector(self):
self._test_get_connection_info(vmdk_connector=True)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch('oslo_vmware.vim_util.get_moref')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, '_relocate_backing')
@mock.patch.object(VMDK_DRIVER, '_get_connection_info')
def _test_initialize_connection(
self, get_connection_info, relocate_backing, create_backing,
get_moref, vops, backing_exists=True, instance_exists=True):
backing_val = mock.sentinel.backing_val
backing = mock.Mock(value=backing_val)
if backing_exists:
vops.get_backing.return_value = backing
else:
vops.get_backing.return_value = None
create_backing.return_value = backing
if instance_exists:
instance_val = mock.sentinel.instance_val
connector = {'instance': instance_val}
instance_moref = mock.sentinel.instance_moref
get_moref.return_value = instance_moref
host = mock.sentinel.host
vops.get_host.return_value = host
else:
connector = {}
conn_info = mock.sentinel.conn_info
get_connection_info.return_value = conn_info
volume = self._create_volume_obj()
ret = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info, ret)
if instance_exists:
vops.get_host.assert_called_once_with(instance_moref)
if backing_exists:
relocate_backing.assert_called_once_with(volume, backing, host)
create_backing.assert_not_called()
else:
create_backing.assert_called_once_with(volume, host)
relocate_backing.assert_not_called()
elif not backing_exists:
create_backing.assert_called_once_with(volume)
relocate_backing.assert_not_called()
else:
create_backing.assert_not_called()
relocate_backing.assert_not_called()
get_connection_info.assert_called_once_with(volume, backing, connector)
def test_initialize_connection_with_instance_and_backing(self):
self._test_initialize_connection()
def test_initialize_connection_with_instance_and_no_backing(self):
self._test_initialize_connection(backing_exists=False)
def test_initialize_connection_with_no_instance_and_no_backing(self):
self._test_initialize_connection(
backing_exists=False, instance_exists=False)
def test_initialize_connection_with_no_instance_and_backing(self):
self._test_initialize_connection(instance_exists=False)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def _test_get_volume_group_folder(self, vops, snapshot=False):
folder = mock.sentinel.folder
vops.create_vm_inventory_folder.return_value = folder
datacenter = mock.sentinel.dc
project_id = '63c19a12292549818c09946a5e59ddaf'
self.assertEqual(folder,
self._driver._get_volume_group_folder(
datacenter, project_id, snapshot=snapshot))
project_folder_name = 'Project (%s)' % project_id
exp_folder_names = ['OpenStack',
project_folder_name,
self.VOLUME_FOLDER]
if snapshot:
exp_folder_names.append('Snapshots')
vops.create_vm_inventory_folder.assert_called_once_with(
datacenter, exp_folder_names)
def test_get_volume_group_folder(self):
self._test_get_volume_group_folder()
def test_get_volume_group_folder_for_snapshot(self):
self._test_get_volume_group_folder(snapshot=True)
@mock.patch('cinder.volume.drivers.vmware.vmdk.'
'_get_volume_type_extra_spec')
@ddt.data('full', 'linked')
def test_get_clone_type(self, clone_type, get_volume_type_extra_spec):
get_volume_type_extra_spec.return_value = clone_type
volume = self._create_volume_dict()
self.assertEqual(clone_type, self._driver._get_clone_type(volume))
get_volume_type_extra_spec.assert_called_once_with(
volume['volume_type_id'], 'clone_type',
default_value=volumeops.FULL_CLONE_TYPE)
@mock.patch('cinder.volume.drivers.vmware.vmdk.'
'_get_volume_type_extra_spec')
def test_get_clone_type_invalid(
self, get_volume_type_extra_spec):
get_volume_type_extra_spec.return_value = 'foo'
volume = self._create_volume_dict()
self.assertRaises(
cinder_exceptions.Invalid, self._driver._get_clone_type, volume)
get_volume_type_extra_spec.assert_called_once_with(
volume['volume_type_id'], 'clone_type',
default_value=volumeops.FULL_CLONE_TYPE)
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
def _test_clone_backing(
self, extend_backing, select_ds_for_volume, vops, get_disk_type,
clone_type=volumeops.FULL_CLONE_TYPE, extend_needed=False,
vc60=False):
host = mock.sentinel.host
rp = mock.sentinel.rp
folder = mock.sentinel.folder
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
select_ds_for_volume.return_value = (host, rp, folder, summary)
clone = mock.sentinel.clone
vops.clone_backing.return_value = clone
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
if vc60:
self._driver._vc_version = '6.0'
else:
self._driver._vc_version = '5.5'
src_vsize = 1
if extend_needed:
size = 2
else:
size = 1
volume = self._create_volume_obj(size=size)
backing = mock.sentinel.backing
snapshot = mock.sentinel.snapshot
self._driver._clone_backing(
volume, backing, snapshot, clone_type, src_vsize)
extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'],
volumeops.BACKING_UUID_KEY: volume['id']}
if volume.size > src_vsize or clone_type == volumeops.FULL_CLONE_TYPE:
vops.clone_backing.assert_called_once_with(
volume.name,
backing,
snapshot,
volumeops.FULL_CLONE_TYPE,
datastore,
host=host,
resource_pool=rp,
extra_config=extra_config,
folder=folder)
vops.update_backing_disk_uuid.assert_called_once_with(clone,
volume.id)
else:
vops.clone_backing.assert_called_once_with(
volume.name,
backing,
snapshot,
volumeops.LINKED_CLONE_TYPE,
None,
host=None,
resource_pool=None,
extra_config=extra_config,
folder=None)
if not vc60:
vops.update_backing_disk_uuid.assert_called_once_with(
clone, volume.id)
else:
vops.update_backing_disk_uuid.assert_not_called()
if volume.size > src_vsize:
extend_backing.assert_called_once_with(clone, volume.size,
disk_type)
else:
extend_backing.assert_not_called()
@ddt.data(volumeops.FULL_CLONE_TYPE, volumeops.LINKED_CLONE_TYPE)
def test_clone_backing(self, clone_type):
self._test_clone_backing(clone_type=clone_type)
@ddt.data(volumeops.FULL_CLONE_TYPE, volumeops.LINKED_CLONE_TYPE)
def test_clone_backing_with_extend(self, clone_type):
self._test_clone_backing(clone_type=clone_type, extend_needed=True)
def test_clone_backing_linked_vc_60(self):
self._test_clone_backing(
clone_type=volumeops.LINKED_CLONE_TYPE, vc60=True)
@mock.patch.object(VMDK_DRIVER, '_get_template_by_inv_path')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_create_volume_from_temp_backing')
def test_create_volume_from_template(
self, create_volume_from_temp_backing, vops, get_disk_type,
select_ds_for_volume, generate_uuid, get_template_by_inv_path):
template = mock.sentinel.template
get_template_by_inv_path.return_value = template
tmp_name = 'de4c648c-8403-4dcc-b14a-d2541b7cba2b'
generate_uuid.return_value = tmp_name
host = mock.sentinel.host
rp = mock.sentinel.rp
folder = mock.sentinel.folder
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
select_ds_for_volume.return_value = (host, rp, folder, summary)
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
tmp_backing = mock.sentinel.tmp_backing
vops.clone_backing.return_value = tmp_backing
volume = self._create_volume_obj()
inv_path = mock.sentinel.inv_path
self._driver._create_volume_from_template(volume, inv_path)
get_template_by_inv_path.assert_called_once_with(inv_path)
select_ds_for_volume.assert_called_once_with(volume)
get_disk_type.assert_called_once_with(volume)
vops.clone_backing.assert_called_once_with(tmp_name,
template,
None,
volumeops.FULL_CLONE_TYPE,
datastore,
disk_type=disk_type,
host=host,
resource_pool=rp,
folder=folder)
create_volume_from_temp_backing.assert_called_once_with(volume,
tmp_backing)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_clone_backing')
def test_create_volume_from_snapshot_without_backing(self, clone_backing,
vops):
vops.get_backing.return_value = None
volume = self._create_volume_dict()
src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID)
snapshot = self._create_snapshot_dict(src_vref)
self._driver.create_volume_from_snapshot(volume, snapshot)
vops.get_backing.assert_called_once_with(snapshot['volume_name'],
snapshot['volume']['id'])
clone_backing.assert_not_called()
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_clone_backing')
def test_create_volume_from_snapshot_without_backing_snapshot(
self, clone_backing, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
vops.get_snapshot.return_value = None
volume = self._create_volume_dict()
src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID)
snapshot = self._create_snapshot_dict(src_vref)
self._driver.create_volume_from_snapshot(volume, snapshot)
vops.get_backing.assert_called_once_with(snapshot['volume_name'],
snapshot['volume']['id'])
vops.get_snapshot.assert_called_once_with(backing, snapshot['name'])
clone_backing.assert_not_called()
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_clone_type')
@mock.patch.object(VMDK_DRIVER, '_create_volume_from_template')
@mock.patch.object(VMDK_DRIVER, '_clone_backing')
def _test_create_volume_from_snapshot(
self, clone_backing, create_volume_from_template, get_clone_type,
vops, template=False):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
snapshot_moref = mock.sentinel.snap_moref
vops.get_snapshot.return_value = snapshot_moref
get_clone_type.return_value = volumeops.FULL_CLONE_TYPE
volume = self._create_volume_dict()
src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID)
if template:
provider_location = mock.sentinel.inv_path
else:
provider_location = None
snapshot = self._create_snapshot_dict(
src_vref, provider_location=provider_location)
self._driver.create_volume_from_snapshot(volume, snapshot)
vops.get_backing.assert_called_once_with(snapshot['volume_name'],
snapshot['volume']['id'])
if template:
create_volume_from_template.assert_called_once_with(
volume, mock.sentinel.inv_path)
else:
vops.get_snapshot.assert_called_once_with(backing,
snapshot['name'])
get_clone_type.assert_called_once_with(volume)
clone_backing.assert_called_once_with(
volume, backing, snapshot_moref, volumeops.FULL_CLONE_TYPE,
snapshot['volume_size'])
def test_create_volume_from_snapshot(self):
self._test_create_volume_from_snapshot()
def test_create_volume_from_snapshot_template(self):
self._test_create_volume_from_snapshot(template=True)
@mock.patch.object(VMDK_DRIVER, 'session')
def test_get_volume_device_uuid(self, session):
dev_uuid = mock.sentinel.dev_uuid
opt_val = mock.Mock(value=dev_uuid)
session.invoke_api.return_value = opt_val
instance = mock.sentinel.instance
ret = self._driver._get_volume_device_uuid(instance, self.VOL_ID)
self.assertEqual(dev_uuid, ret)
exp_prop = 'config.extraConfig["volume-%s"]' % self.VOL_ID
session.invoke_api.assert_called_once_with(
vim_util, 'get_object_property', session.vim, instance, exp_prop)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_device_uuid')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
def test_create_temp_backing_from_attached_vmdk(
self, generate_uuid, get_volume_device_uuid, vops):
instance = mock.sentinel.instance
vops.get_backing_by_uuid.return_value = instance
vol_dev_uuid = mock.sentinel.vol_dev_uuid
get_volume_device_uuid.return_value = vol_dev_uuid
tmp_name = mock.sentinel.tmp_name
generate_uuid.return_value = tmp_name
tmp_backing = mock.sentinel.tmp_backing
vops.clone_backing.return_value = tmp_backing
instance_uuid = fake_constants.INSTANCE_ID
attachment = fake_volume.fake_db_volume_attachment(
instance_uuid=instance_uuid)
src_vref = self._create_volume_dict(vol_id=fake_constants.VOLUME_ID,
attachment=[attachment])
host = mock.sentinel.host
rp = mock.sentinel.rp
folder = mock.sentinel.folder
datastore = mock.sentinel.datastore
ret = self._driver._create_temp_backing_from_attached_vmdk(
src_vref, host, rp, folder, datastore)
self.assertEqual(tmp_backing, ret)
vops.get_backing_by_uuid.assert_called_once_with(instance_uuid)
get_volume_device_uuid.assert_called_once_with(instance,
src_vref['id'])
vops.clone_backing.assert_called_once_with(
tmp_name, instance, None, volumeops.FULL_CLONE_TYPE, datastore,
host=host, resource_pool=rp, folder=folder,
disks_to_clone=[vol_dev_uuid])
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
def _test_extend_backing_if_needed(
self, extend_backing, vops, get_disk_type, extend=True):
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
if extend:
vol_size = 2
else:
vol_size = 1
vops.get_disk_size.return_value = units.Gi
volume = self._create_volume_obj(size=vol_size)
backing = mock.sentinel.backing
self._driver._extend_if_needed(volume, backing)
vops.get_disk_size.assert_called_once_with(backing)
if extend:
extend_backing.assert_called_once_with(backing, vol_size,
disk_type)
else:
extend_backing.assert_not_called()
def test_extend_backing_if_needed(self):
self._test_extend_backing_if_needed()
def test_extend_backing_if_needed_no_extend(self):
self._test_extend_backing_if_needed(extend=False)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_manage_existing_int')
@mock.patch.object(VMDK_DRIVER, '_extend_if_needed')
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
def test_create_volume_from_temp_backing(
self, delete_temp_backing, extend_if_needed, manage_existing_int,
vops):
disk_device = mock.sentinel.disk_device
vops._get_disk_device.return_value = disk_device
backing = mock.sentinel.backing
manage_existing_int.return_value = backing
volume = self._create_volume_dict()
tmp_backing = mock.sentinel.tmp_backing
self._driver._create_volume_from_temp_backing(volume, tmp_backing)
vops._get_disk_device.assert_called_once_with(tmp_backing)
manage_existing_int.assert_called_once_with(
volume, tmp_backing, disk_device)
extend_if_needed.assert_called_once_with(volume, backing)
delete_temp_backing.assert_called_once_with(tmp_backing)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_create_temp_backing_from_attached_vmdk')
@mock.patch.object(VMDK_DRIVER, '_create_volume_from_temp_backing')
def test_clone_attached_volume(
self, create_volume_from_temp_backing,
create_temp_backing_from_attached_vmdk, select_ds_for_volume):
host = mock.sentinel.host
rp = mock.sentinel.rp
folder = mock.sentinel.folder
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
select_ds_for_volume.return_value = (host, rp, folder, summary)
tmp_backing = mock.sentinel.tmp_backing
create_temp_backing_from_attached_vmdk.return_value = tmp_backing
src_vref = mock.sentinel.src_vref
volume = mock.sentinel.volume
self._driver._clone_attached_volume(src_vref, volume)
select_ds_for_volume.assert_called_once_with(volume)
create_temp_backing_from_attached_vmdk.assert_called_once_with(
src_vref, host, rp, folder, datastore)
create_volume_from_temp_backing.assert_called_once_with(
volume, tmp_backing)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_clone_backing')
def test_create_cloned_volume_without_backing(self, clone_backing, vops):
vops.get_backing.return_value = None
volume = self._create_volume_dict()
src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID)
self._driver.create_cloned_volume(volume, src_vref)
vops.get_backing.assert_called_once_with(src_vref['name'],
src_vref['id'])
clone_backing.assert_not_called()
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_clone_type')
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
@mock.patch.object(VMDK_DRIVER, '_clone_backing')
def test_create_cloned_volume(
self, clone_backing, in_use, get_clone_type, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
get_clone_type.return_value = volumeops.FULL_CLONE_TYPE
volume = self._create_volume_dict()
src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID)
self._driver.create_cloned_volume(volume, src_vref)
vops.get_backing.assert_called_once_with(src_vref['name'],
src_vref['id'])
get_clone_type.assert_called_once_with(volume)
clone_backing.assert_called_once_with(
volume, backing, None, volumeops.FULL_CLONE_TYPE, src_vref['size'])
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_clone_type')
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
@mock.patch.object(VMDK_DRIVER, '_clone_backing')
def test_create_cloned_volume_linked(
self, clone_backing, in_use, get_clone_type, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
get_clone_type.return_value = volumeops.LINKED_CLONE_TYPE
temp_snapshot = mock.sentinel.temp_snapshot
vops.create_snapshot.return_value = temp_snapshot
volume = self._create_volume_dict()
src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID)
self._driver.create_cloned_volume(volume, src_vref)
vops.get_backing.assert_called_once_with(src_vref['name'],
src_vref['id'])
get_clone_type.assert_called_once_with(volume)
temp_snap_name = 'temp-snapshot-%s' % volume['id']
vops.create_snapshot.assert_called_once_with(
backing, temp_snap_name, None)
clone_backing.assert_called_once_with(
volume, backing, temp_snapshot, volumeops.LINKED_CLONE_TYPE,
src_vref['size'])
vops.delete_snapshot.assert_called_once_with(backing, temp_snap_name)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_clone_type')
@mock.patch.object(VMDK_DRIVER, '_clone_backing')
def test_create_cloned_volume_linked_when_attached(
self, clone_backing, get_clone_type, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
get_clone_type.return_value = volumeops.LINKED_CLONE_TYPE
volume = self._create_volume_dict()
src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID,
status='in-use')
self.assertRaises(cinder_exceptions.InvalidVolume,
self._driver.create_cloned_volume,
volume,
src_vref)
vops.get_backing.assert_called_once_with(src_vref['name'],
src_vref['id'])
get_clone_type.assert_called_once_with(volume)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_clone_type')
@mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True)
@mock.patch.object(VMDK_DRIVER, '_clone_attached_volume')
def test_create_cloned_volume_when_attached(
self, clone_attached_volume, in_use, get_clone_type, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
get_clone_type.return_value = volumeops.FULL_CLONE_TYPE
volume = self._create_volume_dict(status='in-use')
src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID)
self._driver.create_cloned_volume(volume, src_vref)
vops.get_backing.assert_called_once_with(src_vref['name'],
src_vref['id'])
get_clone_type.assert_called_once_with(volume)
clone_attached_volume.assert_called_once_with(src_vref, volume)
@mock.patch('cinder.volume.drivers.vmware.vmdk.'
'_get_volume_type_extra_spec')
def test_get_extra_spec_storage_profile(self, get_volume_type_extra_spec):
vol_type_id = mock.sentinel.vol_type_id
self._driver._get_extra_spec_storage_profile(vol_type_id)
get_volume_type_extra_spec.assert_called_once_with(vol_type_id,
'storage_profile')
@mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile')
def test_get_storage_profile(self, get_extra_spec_storage_profile):
volume = self._create_volume_dict()
self._driver._get_storage_profile(volume)
get_extra_spec_storage_profile.assert_called_once_with(
volume['volume_type_id'])
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch('oslo_vmware.pbm.get_profile_id_by_name')
def test_get_storage_profile_id(
self, get_profile_id_by_name, session, get_storage_profile):
get_storage_profile.return_value = 'gold'
profile_id = mock.sentinel.profile_id
get_profile_id_by_name.return_value = mock.Mock(uniqueId=profile_id)
self._driver._storage_policy_enabled = True
volume = self._create_volume_dict()
self.assertEqual(profile_id,
self._driver._get_storage_profile_id(volume))
get_storage_profile.assert_called_once_with(volume)
get_profile_id_by_name.assert_called_once_with(session, 'gold')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch('oslo_vmware.pbm.get_profile_id_by_name')
def test_get_storage_profile_id_with_missing_extra_spec(
self, get_profile_id_by_name, session, get_storage_profile):
get_storage_profile.return_value = None
self._driver._storage_policy_enabled = True
volume = self._create_volume_dict()
self.assertIsNone(self._driver._get_storage_profile_id(volume))
get_storage_profile.assert_called_once_with(volume)
self.assertFalse(get_profile_id_by_name.called)
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch('oslo_vmware.pbm.get_profile_id_by_name')
def test_get_storage_profile_id_with_pbm_disabled(
self, get_profile_id_by_name, session, get_storage_profile):
get_storage_profile.return_value = 'gold'
volume = self._create_volume_dict()
self.assertIsNone(self._driver._get_storage_profile_id(volume))
get_storage_profile.assert_called_once_with(volume)
self.assertFalse(get_profile_id_by_name.called)
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch('oslo_vmware.pbm.get_profile_id_by_name')
def test_get_storage_profile_id_with_missing_profile(
self, get_profile_id_by_name, session, get_storage_profile):
get_storage_profile.return_value = 'gold'
get_profile_id_by_name.return_value = None
self._driver._storage_policy_enabled = True
volume = self._create_volume_dict()
self.assertIsNone(self._driver._get_storage_profile_id(volume))
get_storage_profile.assert_called_once_with(volume)
get_profile_id_by_name.assert_called_once_with(session, 'gold')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch('cinder.image.image_utils.TemporaryImages.for_image_service')
@mock.patch('cinder.volume.drivers.vmware.vmdk.open', create=True)
@mock.patch('oslo_vmware.image_transfer.download_file')
@mock.patch('oslo_vmware.image_transfer.download_flat_image')
def _test_copy_image(self, download_flat_image, download_file, mock_open,
temp_images_img_service, session, vops,
expected_cacerts=False, use_temp_image=False):
dc_name = mock.sentinel.dc_name
vops.get_entity_name.return_value = dc_name
mock_get = mock.Mock(return_value=None)
tmp_images = mock.Mock(get=mock_get)
temp_images_img_service.return_value = tmp_images
if use_temp_image:
mock_get.return_value = '/tmp/foo'
mock_open_ret = mock.Mock()
mock_open_ret.__enter__ = mock.Mock(
return_value=mock.sentinel.read_handle)
mock_open_ret.__exit__ = mock.Mock()
mock_open.return_value = mock_open_ret
context = mock.sentinel.context
dc_ref = mock.sentinel.dc_ref
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = 102400
ds_name = mock.sentinel.ds_name
upload_file_path = mock.sentinel.upload_file_path
self._driver._copy_image(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, upload_file_path)
vops.get_entity_name.assert_called_once_with(dc_ref)
cookies = session.vim.client.options.transport.cookiejar
if use_temp_image:
mock_open.assert_called_once_with('/tmp/foo', 'rb')
download_file.assert_called_once_with(
mock.sentinel.read_handle,
self._config.vmware_host_ip,
self._config.vmware_host_port,
dc_name,
ds_name,
cookies,
upload_file_path,
image_size_in_bytes,
expected_cacerts,
self._config.vmware_image_transfer_timeout_secs)
else:
download_flat_image.assert_called_once_with(
context,
self._config.vmware_image_transfer_timeout_secs,
image_service,
image_id,
image_size=image_size_in_bytes,
host=self._config.vmware_host_ip,
port=self._config.vmware_host_port,
data_center_name=dc_name,
datastore_name=ds_name,
cookies=cookies,
file_path=upload_file_path,
cacerts=expected_cacerts)
def test_copy_image(self):
# Default value of vmware_ca_file is not None; it should be passed
# to download_flat_image as cacerts.
self._test_copy_image(expected_cacerts=self._config.vmware_ca_file)
def test_copy_image_insecure(self):
# Set config options to allow insecure connections.
self._config.vmware_ca_file = None
self._config.vmware_insecure = True
# Since vmware_ca_file is unset and vmware_insecure is True,
# dowload_flat_image should be called with cacerts=False.
self._test_copy_image()
def test_copy_temp_image(self):
self._test_copy_image(expected_cacerts=self._config.vmware_ca_file,
use_temp_image=True)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_adapter_type')
def _test_create_backing(
self, get_adapter_type, get_disk_type, vops,
get_storage_profile_id, select_ds_for_volume, create_params=None):
create_params = create_params or {}
host = mock.sentinel.host
resource_pool = mock.sentinel.resource_pool
folder = mock.sentinel.folder
summary = mock.sentinel.summary
select_ds_for_volume.return_value = (host, resource_pool, folder,
summary)
profile_id = mock.sentinel.profile_id
get_storage_profile_id.return_value = profile_id
backing = mock.sentinel.backing
vops.create_backing_disk_less.return_value = backing
vops.create_backing.return_value = backing
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
adapter_type = mock.sentinel.adapter_type
get_adapter_type.return_value = adapter_type
volume = self._create_volume_dict()
ret = self._driver._create_backing(volume, host, create_params)
self.assertEqual(backing, ret)
select_ds_for_volume.assert_called_once_with(volume, host)
get_storage_profile_id.assert_called_once_with(volume)
exp_extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'],
volumeops.BACKING_UUID_KEY: volume['id']}
if create_params.get(vmdk.CREATE_PARAM_DISK_LESS):
vops.create_backing_disk_less.assert_called_once_with(
volume['name'],
folder,
resource_pool,
host,
summary.name,
profileId=profile_id,
extra_config=exp_extra_config)
vops.update_backing_disk_uuid.assert_not_called()
else:
get_disk_type.assert_called_once_with(volume)
get_adapter_type.assert_called_once_with(volume)
exp_backing_name = (
create_params.get(vmdk.CREATE_PARAM_BACKING_NAME) or
volume['name'])
exp_adapter_type = (
create_params.get(vmdk.CREATE_PARAM_ADAPTER_TYPE) or
adapter_type)
vops.create_backing.assert_called_once_with(
exp_backing_name,
volume['size'] * units.Mi,
disk_type,
folder,
resource_pool,
host,
summary.name,
profileId=profile_id,
adapter_type=exp_adapter_type,
extra_config=exp_extra_config)
vops.update_backing_disk_uuid.assert_called_once_with(backing,
volume['id'])
def test_create_backing_disk_less(self):
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True}
self._test_create_backing(create_params=create_params)
def test_create_backing_with_adapter_type_override(self):
create_params = {vmdk.CREATE_PARAM_ADAPTER_TYPE: 'ide'}
self._test_create_backing(create_params=create_params)
def test_create_backing_with_backing_name_override(self):
create_params = {vmdk.CREATE_PARAM_BACKING_NAME: 'foo'}
self._test_create_backing(create_params=create_params)
def test_create_backing(self):
self._test_create_backing()
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_hosts(self, vops):
host_1 = mock.sentinel.host_1
host_2 = mock.sentinel.host_2
host_3 = mock.sentinel.host_3
vops.get_cluster_hosts.side_effect = [[host_1, host_2], [host_3]]
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
self.assertEqual([host_1, host_2, host_3],
self._driver._get_hosts([cls_1, cls_2]))
exp_calls = [mock.call(cls_1), mock.call(cls_2)]
self.assertEqual(exp_calls, vops.get_cluster_hosts.call_args_list)
@mock.patch.object(VMDK_DRIVER, '_get_hosts')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_datastore(self, ds_sel, get_hosts):
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
self._driver._clusters = [cls_1, cls_2]
host_1 = mock.sentinel.host_1
host_2 = mock.sentinel.host_2
host_3 = mock.sentinel.host_3
get_hosts.return_value = [host_1, host_2, host_3]
best_candidate = mock.sentinel.best_candidate
ds_sel.select_datastore.return_value = best_candidate
req = mock.sentinel.req
self.assertEqual(best_candidate, self._driver._select_datastore(req))
get_hosts.assert_called_once_with(self._driver._clusters)
ds_sel.select_datastore.assert_called_once_with(
req, hosts=[host_1, host_2, host_3])
@mock.patch.object(VMDK_DRIVER, '_get_hosts')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_datastore_with_no_best_candidate(self, ds_sel, get_hosts):
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
self._driver._clusters = [cls_1, cls_2]
host_1 = mock.sentinel.host_1
host_2 = mock.sentinel.host_2
host_3 = mock.sentinel.host_3
get_hosts.return_value = [host_1, host_2, host_3]
ds_sel.select_datastore.return_value = ()
req = mock.sentinel.req
self.assertRaises(vmdk_exceptions.NoValidDatastoreException,
self._driver._select_datastore,
req)
get_hosts.assert_called_once_with(self._driver._clusters)
ds_sel.select_datastore.assert_called_once_with(
req, hosts=[host_1, host_2, host_3])
@mock.patch.object(VMDK_DRIVER, '_get_hosts')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_datastore_with_single_host(self, ds_sel, get_hosts):
best_candidate = mock.sentinel.best_candidate
ds_sel.select_datastore.return_value = best_candidate
req = mock.sentinel.req
host_1 = mock.sentinel.host_1
self.assertEqual(best_candidate,
self._driver._select_datastore(req, host_1))
ds_sel.select_datastore.assert_called_once_with(req, hosts=[host_1])
self.assertFalse(get_hosts.called)
@mock.patch.object(VMDK_DRIVER, '_get_hosts')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_datastore_with_empty_clusters(self, ds_sel, get_hosts):
self._driver._clusters = None
best_candidate = mock.sentinel.best_candidate
ds_sel.select_datastore.return_value = best_candidate
req = mock.sentinel.req
self.assertEqual(best_candidate, self._driver._select_datastore(req))
ds_sel.select_datastore.assert_called_once_with(req, hosts=None)
self.assertFalse(get_hosts.called)
@mock.patch.object(VMDK_DRIVER, '_get_hosts')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_datastore_with_no_valid_host(self, ds_sel, get_hosts):
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
self._driver._clusters = [cls_1, cls_2]
get_hosts.return_value = []
req = mock.sentinel.req
self.assertRaises(vmdk_exceptions.NoValidHostException,
self._driver._select_datastore, req)
get_hosts.assert_called_once_with(self._driver._clusters)
self.assertFalse(ds_sel.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing_nop(self, ds_sel, get_profile, vops):
self._driver._storage_policy_enabled = True
volume = self._create_volume_dict()
datastore = mock.sentinel.datastore
vops.get_datastore.return_value = datastore
profile = mock.sentinel.profile
get_profile.return_value = profile
vops.is_datastore_accessible.return_value = True
ds_sel.is_datastore_compliant.return_value = True
backing = mock.sentinel.backing
host = mock.sentinel.host
self._driver._relocate_backing(volume, backing, host)
get_profile.assert_called_once_with(volume)
vops.is_datastore_accessible.assert_called_once_with(datastore, host)
ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
profile)
self.assertFalse(vops.relocate_backing.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing_with_no_datastore(
self, ds_sel, get_profile, vops):
self._driver._storage_policy_enabled = True
volume = self._create_volume_dict()
profile = mock.sentinel.profile
get_profile.return_value = profile
vops.is_datastore_accessible.return_value = True
ds_sel.is_datastore_compliant.return_value = False
ds_sel.select_datastore.return_value = []
backing = mock.sentinel.backing
host = mock.sentinel.host
self.assertRaises(vmdk_exceptions.NoValidDatastoreException,
self._driver._relocate_backing,
volume,
backing,
host)
get_profile.assert_called_once_with(volume)
ds_sel.select_datastore.assert_called_once_with(
{hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
hub.DatastoreSelector.PROFILE_NAME: profile}, hosts=[host])
self.assertFalse(vops.relocate_backing.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_dc')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing(
self, ds_sel, get_volume_group_folder, get_dc, vops):
volume = self._create_volume_dict()
vops.is_datastore_accessible.return_value = False
ds_sel.is_datastore_compliant.return_value = True
backing = mock.sentinel.backing
host = mock.sentinel.host
rp = mock.sentinel.rp
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
ds_sel.select_datastore.return_value = (host, rp, summary)
dc = mock.sentinel.dc
get_dc.return_value = dc
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
self._driver._relocate_backing(volume, backing, host)
get_dc.assert_called_once_with(rp)
get_volume_group_folder.assert_called_once_with(
dc, volume['project_id'])
vops.relocate_backing.assert_called_once_with(backing,
datastore,
rp,
host)
vops.move_backing_to_folder.assert_called_once_with(backing,
folder)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_dc')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing_with_pbm_disabled(
self, ds_sel, get_volume_group_folder, get_dc, vops):
self._driver._storage_policy_enabled = False
volume = self._create_volume_dict()
vops.is_datastore_accessible.return_value = False
backing = mock.sentinel.backing
host = mock.sentinel.host
rp = mock.sentinel.rp
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
ds_sel.select_datastore.return_value = (host, rp, summary)
dc = mock.sentinel.dc
get_dc.return_value = dc
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
self._driver._relocate_backing(volume, backing, host)
self.assertFalse(vops.get_profile.called)
get_dc.assert_called_once_with(rp)
get_volume_group_folder.assert_called_once_with(
dc, volume['project_id'])
vops.relocate_backing.assert_called_once_with(backing,
datastore,
rp,
host)
vops.move_backing_to_folder.assert_called_once_with(backing,
folder)
ds_sel.select_datastore.assert_called_once_with(
{hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
hub.DatastoreSelector.PROFILE_NAME: None}, hosts=[host])
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_disk_device(self, vops):
vm = mock.sentinel.vm
vops.get_entity_by_inventory_path.return_value = vm
dev = mock.sentinel.dev
vops.get_disk_device.return_value = dev
vm_inv_path = mock.sentinel.vm_inv_path
vmdk_path = mock.sentinel.vmdk_path
ret = self._driver._get_disk_device(vmdk_path, vm_inv_path)
self.assertEqual((vm, dev), ret)
vops.get_entity_by_inventory_path.assert_called_once_with(vm_inv_path)
vops.get_disk_device.assert_called_once_with(vm, vmdk_path)
def test_get_existing_with_empty_source_name(self):
self.assertRaises(cinder_exceptions.InvalidInput,
self._driver._get_existing,
{})
def test_get_existing_with_invalid_source_name(self):
self.assertRaises(cinder_exceptions.InvalidInput,
self._driver._get_existing,
{'source-name': 'foo'})
@mock.patch.object(VMDK_DRIVER, '_get_disk_device', return_value=None)
def test_get_existing_with_invalid_existing_ref(self, get_disk_device):
self.assertRaises(cinder_exceptions.ManageExistingInvalidReference,
self._driver._get_existing,
{'source-name': '[ds1] foo/foo.vmdk@/dc-1/vm/foo'})
get_disk_device.assert_called_once_with('[ds1] foo/foo.vmdk',
'/dc-1/vm/foo')
@mock.patch.object(VMDK_DRIVER, '_get_disk_device')
def test_get_existing(self, get_disk_device):
vm = mock.sentinel.vm
disk_device = mock.sentinel.disk_device
get_disk_device.return_value = (vm, disk_device)
self.assertEqual(
(vm, disk_device),
self._driver._get_existing({'source-name':
'[ds1] foo/foo.vmdk@/dc-1/vm/foo'}))
get_disk_device.assert_called_once_with('[ds1] foo/foo.vmdk',
'/dc-1/vm/foo')
@mock.patch.object(VMDK_DRIVER, '_get_existing')
@ddt.data((16384, 1), (1048576, 1), (1572864, 2))
def test_manage_existing_get_size(self, test_data, get_existing):
(capacity_kb, exp_size) = test_data
disk_device = mock.Mock(capacityInKB=capacity_kb)
get_existing.return_value = (mock.sentinel.vm, disk_device)
volume = mock.sentinel.volume
existing_ref = mock.sentinel.existing_ref
self.assertEqual(exp_size,
self._driver.manage_existing_get_size(volume,
existing_ref))
get_existing.assert_called_once_with(existing_ref)
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_adapter_type')
def test_manage_existing_int(
self, get_adapter_type, get_disk_type, get_storage_profile_id,
get_ds_name_folder_path, vops, create_backing):
backing = mock.sentinel.backing
create_backing.return_value = backing
src_dc = mock.sentinel.src_dc
dest_dc = mock.sentinel.dest_dc
vops.get_dc.side_effect = [src_dc, dest_dc]
volume = self._create_volume_dict()
ds_name = "ds1"
folder_path = "%s/" % volume['name']
get_ds_name_folder_path.return_value = (ds_name, folder_path)
profile_id = mock.sentinel.profile_id
get_storage_profile_id.return_value = profile_id
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
adapter_type = mock.sentinel.adapter_type
get_adapter_type.return_value = adapter_type
vm = mock.sentinel.vm
src_path = mock.sentinel.src_path
disk_backing = mock.Mock(fileName=src_path)
disk_device = mock.Mock(backing=disk_backing, capacityInKB=1048576)
ret = self._driver._manage_existing_int(volume, vm, disk_device)
self.assertEqual(backing, ret)
create_backing.assert_called_once_with(
volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True})
vops.detach_disk_from_backing.assert_called_once_with(vm, disk_device)
dest_path = "[%s] %s%s.vmdk" % (ds_name, folder_path, volume['name'])
vops.move_vmdk_file.assert_called_once_with(
src_dc, src_path, dest_path, dest_dc_ref=dest_dc)
get_storage_profile_id.assert_called_once_with(volume)
get_adapter_type.assert_called_once_with(volume)
vops.attach_disk_to_backing.assert_called_once_with(
backing, disk_device.capacityInKB, disk_type,
adapter_type, profile_id, dest_path)
vops.update_backing_disk_uuid.assert_called_once_with(backing,
volume['id'])
@mock.patch.object(VMDK_DRIVER, '_get_existing')
@mock.patch.object(VMDK_DRIVER, '_manage_existing_int')
def test_manage_existing(self, manage_existing_int, get_existing):
vm = mock.sentinel.vm
disk_device = mock.sentinel.disk_device
get_existing.return_value = (vm, disk_device)
volume = mock.sentinel.volume
existing_ref = mock.sentinel.existing_ref
self._driver.manage_existing(volume, existing_ref)
get_existing.assert_called_once_with(existing_ref)
manage_existing_int.assert_called_once_with(volume, vm, disk_device)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_unmanage(self, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
volume = self._create_volume_dict()
self._driver.unmanage(volume)
vops.get_backing.assert_called_once_with(volume['name'], volume['id'])
vops.update_backing_extra_config.assert_called_once_with(
backing, {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: '',
volumeops.BACKING_UUID_KEY: ''})
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_create_session(self, apiSession):
session = mock.sentinel.session
apiSession.return_value = session
ret = self._driver._create_session()
self.assertEqual(session, ret)
config = self._driver.configuration
apiSession.assert_called_once_with(
config.vmware_host_ip,
config.vmware_host_username,
config.vmware_host_password,
config.vmware_api_retry_count,
config.vmware_task_poll_interval,
wsdl_loc=config.safe_get('vmware_wsdl_location'),
port=config.vmware_host_port,
cacert=config.vmware_ca_file,
insecure=config.vmware_insecure,
pool_size=config.vmware_connection_pool_size,
op_id_prefix='c-vol')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
def test_extend_volume_with_no_backing(self, extend_backing, vops):
vops.get_backing.return_value = None
volume = self._create_volume_dict()
self._driver.extend_volume(volume, 2)
self.assertFalse(extend_backing.called)
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
def test_extend_volume(self, extend_backing, vops, get_disk_type):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
volume = self._create_volume_dict()
new_size = 2
self._driver.extend_volume(volume, new_size)
extend_backing.assert_called_once_with(backing, new_size, disk_type)
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_extend_volume_with_no_disk_space(self, select_ds_for_volume,
extend_backing, vops,
get_disk_type):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
extend_backing.side_effect = [exceptions.NoDiskSpaceException, None]
host = mock.sentinel.host
rp = mock.sentinel.rp
folder = mock.sentinel.folder
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
select_ds_for_volume.return_value = (host, rp, folder, summary)
volume = self._create_volume_dict()
new_size = 2
self._driver.extend_volume(volume, new_size)
create_params = {vmdk.CREATE_PARAM_DISK_SIZE: new_size}
select_ds_for_volume.assert_called_once_with(
volume, create_params=create_params)
vops.relocate_backing.assert_called_once_with(backing, datastore, rp,
host)
vops.move_backing_to_folder(backing, folder)
extend_backing_calls = [mock.call(backing, new_size, disk_type),
mock.call(backing, new_size, disk_type)]
self.assertEqual(extend_backing_calls, extend_backing.call_args_list)
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
def test_extend_volume_with_extend_backing_error(
self, extend_backing, vops, get_disk_type):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
extend_backing.side_effect = exceptions.VimException("Error")
volume = self._create_volume_dict()
new_size = 2
self.assertRaises(exceptions.VimException, self._driver.extend_volume,
volume, new_size)
extend_backing.assert_called_once_with(backing, new_size, disk_type)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
def test_accept_transfer(self, get_volume_group_folder, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
dc = mock.sentinel.dc
vops.get_dc.return_value = dc
new_folder = mock.sentinel.new_folder
get_volume_group_folder.return_value = new_folder
context = mock.sentinel.context
volume = self._create_volume_obj()
new_project = mock.sentinel.new_project
self._driver.accept_transfer(context, volume, mock.sentinel.new_user,
new_project)
vops.get_backing.assert_called_once_with(volume.name, volume.id)
vops.get_dc.assert_called_once_with(backing)
get_volume_group_folder.assert_called_once_with(dc, new_project)
vops.move_backing_to_folder.assert_called_once_with(backing,
new_folder)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_revert_to_snapshot_with_no_backing(self, vops):
vops.get_backing.return_value = None
volume = self._create_volume_obj()
snapshot = fake_snapshot.fake_snapshot_obj(self._context,
volume=volume)
self._driver.revert_to_snapshot(
mock.sentinel.context, volume, snapshot)
vops.get_backing.assert_called_once_with(volume.name, volume.id)
vops.revert_to_snapshot.assert_not_called()
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_revert_to_snapshot_template_format(self, vops):
volume = self._create_volume_obj()
loc = '/test-dc/foo'
snapshot = fake_snapshot.fake_snapshot_obj(self._context,
volume=volume,
provider_location=loc)
self.assertRaises(cinder_exceptions.InvalidSnapshot,
self._driver.revert_to_snapshot,
mock.sentinel.context,
volume,
snapshot)
vops.revert_to_snapshot.assert_not_called()
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_revert_to_snapshot(self, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
volume = self._create_volume_obj()
snapshot = fake_snapshot.fake_snapshot_obj(self._context,
volume=volume)
self._driver.revert_to_snapshot(
mock.sentinel.context, volume, snapshot)
vops.get_backing.assert_called_once_with(volume.name, volume.id)
vops.revert_to_snapshot.assert_called_once_with(backing,
snapshot.name)
@ddt.ddt
class ImageDiskTypeTest(test.TestCase):
"""Unit tests for ImageDiskType."""
@ddt.data('thin', 'preallocated', 'streamOptimized', 'sparse')
def test_is_valid(self, image_disk_type):
self.assertTrue(vmdk.ImageDiskType.is_valid(image_disk_type))
def test_is_valid_with_invalid_type(self):
self.assertFalse(vmdk.ImageDiskType.is_valid('thick'))
@ddt.data('thin', 'preallocated', 'streamOptimized', 'sparse')
def test_validate(self, image_disk_type):
vmdk.ImageDiskType.validate(image_disk_type)
def test_validate_with_invalid_type(self):
self.assertRaises(cinder_exceptions.ImageUnacceptable,
vmdk.ImageDiskType.validate,
"thick")
| 43.705097
| 79
| 0.662545
|
748ebce56d353cd40c4d6fe0e7b26b10e22de5d3
| 6,616
|
py
|
Python
|
dreampylib.py
|
michaelndn/dreampylib
|
d1448007c45d41a6cd6bf4be5c23da4dc2c8dff3
|
[
"Apache-2.0"
] | 1
|
2018-10-28T10:33:31.000Z
|
2018-10-28T10:33:31.000Z
|
dreampylib.py
|
michaelndn/dreampylib
|
d1448007c45d41a6cd6bf4be5c23da4dc2c8dff3
|
[
"Apache-2.0"
] | null | null | null |
dreampylib.py
|
michaelndn/dreampylib
|
d1448007c45d41a6cd6bf4be5c23da4dc2c8dff3
|
[
"Apache-2.0"
] | null | null | null |
# Dreampylib - version 1.0
# (c) 2009 by Laurens Simonis
# See licence.txt for licencing info
# updated 2013 by Michael Rodriguez
# UUID is needed to generate a nice random uuid for dreamhost
import uuid
import urllib, urllib2
DEBUG = False
defaultReturnType = 'dict'
class _RemoteCommand(object):
# some magic to catch arbitrary maybe non-existent func. calls
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, name, parent, url):
# Store the name of the
self._name = name
self._cmd = name.replace('.','-')
self._parent = parent
self._url = url
self._child = None
self._resultKeys = []
self._status = ""
self._resultDict = []
self._resultList = []
def Status(self):
if self._child:
return self._child.Status()
else:
return self._status
def ResultKeys(self):
if self._child:
return self._child.ResultKeys()
else:
return self._resultKeys
def ResultList(self):
if self._child:
return self._child.ResultList()
else:
return self._resultList
def ResultDict(self):
if self._child:
return self._child.ResultDict()
else:
return self._resultDict
def __getattr__(self, name):
self._child = _RemoteCommand("%s.%s" % (self._name, name), self._parent, self._url)
return self._child
def __call__(self, returnType = None, *args, **kwargs):
if DEBUG:
print "Called %s(%s)" % (self._name, str(kwargs))
if self._parent.IsConnected():
request = {}
request.update(kwargs)
request.update(self._parent._GetUserData())
request['cmd'] = self._cmd
request['unique_id'] = str(uuid.uuid4())
if DEBUG:
print request
self._connection = urllib2.urlopen(self._url, urllib.urlencode(request))
return self._ParseResult(returnType or defaultReturnType)
else:
return []
def _ParseResult(self, returnType):
'''Parse the result of the request'''
lines = [l.strip() for l in self._connection.readlines()]
self._status = lines[0]
if self._status == 'success':
self._resultKeys = keys = lines[1].split('\t')
table = []
for resultLine in lines[2:]:
values = resultLine.split('\t')
self._resultDict.append(dict(zip(keys,values)))
if len(values) == 1:
self._resultList.append(values[0])
else:
self._resultList.append(values)
if returnType == 'list':
table = self._resultList
else:
table = self._resultDict
if DEBUG:
for t in table:
print t
return table
else:
if DEBUG:
print 'ERROR with %s: %s - %s' % (self._name, lines[0], lines[1])
self._status = '%s: %s - %s' % (self._name, lines[0], lines[1])
return False, lines[0], lines[1]
class DreampyLib(object):
def __init__(self, user=None, key=None, url = 'https://api.dreamhost.com'):
'''Initialises the connection to the dreamhost API.'''
self._user = user
self._key = key
self._url = url
self._lastCommand = None
self._connected = False
self._availableCommands = []
if user and key:
self.Connect()
def Connect(self, user = None, key = None, url = None):
if user:
self._user = user
if key:
self._key = key
if url:
self._url = url
self._connected = True
self._availableCommands = self.api.list_accessible_cmds(returnType = 'list')
self._connected = True if self._availableCommands[0] != False else False
if not self._connected:
self._availableCommands = []
return False
return True
def AvailableCommands(self):
return self._availableCommands
def IsConnected(self):
return self._connected
def ResultKeys(self):
if not self._lastCommand:
return []
else:
return self._lastCommand.ResultKeys()
def ResultList(self):
if not self._lastCommand:
return []
else:
return self._lastCommand.ResultList()
def ResultDict(self):
if not self._lastCommand:
return []
else:
return self._lastCommand.ResultDict()
def Status(self):
if not self._lastCommand:
return None
else:
return self._lastCommand.Status()
def _GetUserData(self):
return { 'username': self._user,
'key': self._key,
}
def __getattr__(self,name):
self._lastCommand = _RemoteCommand(name, self, self._url)
return self._lastCommand
def dir(self):
self.api.list_accessible_cmds()
if __name__ == '__main__':
# Dreamhost test API account:
user = 'apitest@dreamhost.com'
key = '6SHU5P2HLDAYECUM'
# Set this to true to enable debugging
DEBUG = False
# Specify the default returntype.
# Can be either 'dict' or 'list'
defaultReturnType = 'dict'
# Initialize the library and open a connection
connection = DreampyLib(user,key)
# If the connection is up, do some tests.
if connection.IsConnected():
# For instance, list the available commands:
print 'Available commands:\n ',
listOfCommands = connection.AvailableCommands()
print '\n '.join(listOfCommands)
# Even if defaultReturnType is 'dict', you can get the last result as a list, too.
print type(connection.dreamhost_ps.list_size_history(ps = 'ps7093'))
print type(connection.ResultList())
#print connection.mysql.list_dbs()
else:
print "Error connecting!"
print connection.Status()
| 29.145374
| 91
| 0.53416
|
9427f8153b079c6d85d57e14059db811942f0647
| 8,085
|
py
|
Python
|
optics/work_407/work_407_window.py
|
Zelenyy/python-optics-lab
|
01c40f185c9de06ccdb907370df3d35403527336
|
[
"MIT"
] | 4
|
2020-04-07T06:21:40.000Z
|
2020-05-20T17:07:18.000Z
|
optics/work_407/work_407_window.py
|
TopCoder2K/python-optics-lab
|
0c31777f3cd49ed47991381fafe502e1d65816e9
|
[
"MIT"
] | null | null | null |
optics/work_407/work_407_window.py
|
TopCoder2K/python-optics-lab
|
0c31777f3cd49ed47991381fafe502e1d65816e9
|
[
"MIT"
] | 1
|
2020-04-22T06:46:29.000Z
|
2020-04-22T06:46:29.000Z
|
import numpy as np
from PyQt5.QtWidgets import QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QLayout, QGroupBox, QCheckBox, QSpinBox, \
QFormLayout, QPushButton, QDoubleSpinBox, QLabel
from matplotlib.backends.qt_compat import is_pyqt5
from matplotlib.figure import Figure
from matplotlib.ticker import MultipleLocator
from scipy.stats import multivariate_normal
from optics.load_setup import load_setup
from optics.optics_item import Scatter, Polaroid
from optics.work_407.electric_field import ElectricField, EFState
from optics.work_407.work_407_physics import PokkelsIC
from optics.work_407.work_407_setup import Setup407
from optics.work_407.work_407_zero import AnalyzerZero
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas)
class CosScatter(Scatter):
def distribution(self, phi, theta):
return np.cos(3*theta) ** 2
class GaussScatter(Scatter):
def __init__(self):
self.multynorm = multivariate_normal([0, 0], [20, 20])
self.x = np.arange(0.5, 200, 0.5)
self.X, self.Y = np.meshgrid(self.x, self.x)
def distribution(self, phi, theta):
pos = np.empty(self.X.shape + (2,))
pos[:, :, 0] = self.X
pos[:, :, 1] = self.Y
ampl = self.multynorm.pdf(pos)
ampl /= ampl.max()
return ampl
class Lab407Widget(QWidget):
analyzer_zero = None
def __init__(self):
super().__init__()
self.hbox = QHBoxLayout()
self.vbox = QVBoxLayout()
self.setLayout(self.hbox)
# self.setup = Setup407()
self.setup = load_setup(Setup407)
self.image_calculator = PokkelsIC(self.setup)
self.initSetup(self.vbox)
self.initMPLWidget(self.hbox)
self.initCrystal(self.vbox)
self.initScatter(self.vbox)
self.initPolaroid(self.vbox)
self.initField(self.vbox)
self.initDiod(self.vbox)
self.hbox.addLayout(self.vbox)
self.vbox.addStretch()
self.updateCanvas = self.updateCanvasView
self.updateCanvas()
def initSetup(self, layout):
text = [
"Длинна волны: {:.2f} нм".format(self.setup.lambda_light*1e6),
"Длинная кристалла: {:.1f} мм".format(self.setup.crystal_length),
"Растояние от кристала до экрана: {:d} см".format(int(self.setup.length/10)),
"Показатель преломления обыкновенной волны: {:.5f}".format(self.setup.n_ordinary),
"Полная длинна картины {} мм".format(self.image_calculator.full_size)
]
layout.addWidget(QLabel("\n".join(text)))
def initField(self, layout):
self.field = ElectricField(9, EFState.DC)
self.field_checkbox = QCheckBox("Включить блок питания")
input = QDoubleSpinBox()
input.setRange(0, self.field.U)
input.setSingleStep(0.1)
self.field.U = 0
def set_field(state):
input.setEnabled(state)
if state:
self.image_calculator.field = self.field
else:
self.image_calculator.field = None
self.updateCanvas()
self.field_checkbox.stateChanged.connect(set_field)
def set_u(u:float):
self.field.U = u
self.updateCanvas()
input.valueChanged.connect(set_u)
gr_box = QGroupBox("Блок питания")
form = QFormLayout()
form.addRow(self.field_checkbox)
form.addRow("Установить напряжение, В: ", input)
gr_box.setLayout(form)
layout.addWidget(gr_box)
def initDiod(self, layout):
ch_box = QCheckBox("Установить диод")
def set_diod(state):
self.field_checkbox.stateChanged.emit(True)
self.field_checkbox.setDisabled(state)
self.crys_box.setDisabled(state)
self.sca_box.setDisabled(state)
if state:
# self.cha
self.image_calculator.diod = True
self.updateCanvas = self.updateCanvasOscilogramm
self.field.state = EFState.AC
else:
self.image_calculator.diod = None
self.updateCanvas = self.updateCanvasView
self.field.state = EFState.DC
self.updateCanvas()
ch_box.stateChanged.connect(set_diod)
layout.addWidget(ch_box)
def initCrystal(self, layout):
self.crys_box = QCheckBox("Установить кристал")
def set_crystall(state):
if state:
self.image_calculator.crystal = True
else:
self.image_calculator.crystal = None
self.updateCanvas()
self.crys_box.stateChanged.connect(set_crystall)
layout.addWidget(self.crys_box)
def initScatter(self, layout: QLayout):
cos_scatter = CosScatter()
gauss_scatter = GaussScatter()
self.image_calculator.scatter = gauss_scatter
sca_box = QCheckBox("Установить рассеивающую пластинку")
self.sca_box = sca_box
def change_state(state: bool):
if state:
self.image_calculator.scatter = cos_scatter
else:
self.image_calculator.scatter = gauss_scatter
self.updateCanvas()
sca_box.stateChanged.connect(change_state)
layout.addWidget(sca_box)
def initPolaroid(self, layout):
polaroid = Polaroid(self.setup.polaroid_zero)
btn = QPushButton("Поиск нуля анализатора")
def open_analyzer_zero(push):
if self.analyzer_zero is None:
self.analyzer_zero = AnalyzerZero(self.setup)
self.analyzer_zero.show()
btn.clicked.connect(open_analyzer_zero)
gr_box = QGroupBox("Анализатор")
ch_box = QCheckBox("Установить анализатор")
input = QSpinBox()
input.setRange(0, 360)
input.setValue(polaroid.position)
input.setDisabled(True)
form = QFormLayout()
form.addRow(btn)
form.addRow(ch_box)
form.addRow("Угол, градусы: ", input)
gr_box.setLayout(form)
def change_angle(angle: int):
polaroid.position = angle
self.updateCanvas()
input.valueChanged.connect(change_angle)
def change_state(state: bool):
if state:
self.image_calculator.polaroid = polaroid
else:
self.image_calculator.polaroid = None
input.setEnabled(state)
self.updateCanvas()
ch_box.stateChanged.connect(change_state)
layout.addWidget(gr_box)
def initMPLWidget(self, layout):
vbox = QVBoxLayout()
self.canvas = FigureCanvas(Figure(figsize=(10, 10)))
self.ax = self.canvas.figure.subplots()
# vbox.addWidget(NavigationToolbar(self.canvas, self))
vbox.addWidget(self.canvas)
layout.addLayout(vbox)
def updateCanvas(self):
pass
def updateCanvasView(self):
self.ax.clear()
ampl = self.image_calculator.calculate()
img = self.ax.matshow(ampl, cmap="Greys_r", vmin=0, vmax=1)
# self.canvas.figure.colorbar(img)
self.ax.set_axis_off()
self.ax.figure.canvas.draw()
def updateCanvasOscilogramm(self):
self.ax.clear()
ampl = self.image_calculator.calculate()
field = self.field.value()
img = self.ax.scatter(field, 10*ampl, marker=".")
# self.canvas.figure.colorbar(img)
self.ax.grid(True)
self.ax.set_xlim(0,9, auto = True)
self.ax.set_ylim(0,10, auto = True)
self.ax.xaxis.set_minor_locator(MultipleLocator(5))
self.ax.yaxis.set_minor_locator(MultipleLocator(5))
# self.ax.set_axis_off()
self.ax.figure.canvas.draw()
class Lab407Window(QMainWindow):
def __init__(self, name):
super().__init__()
self.central = Lab407Widget()
self.setCentralWidget(self.central)
self.setWindowTitle(name)
| 33
| 118
| 0.628077
|
93da62791df4f3389e731cef910323fae565e417
| 20,357
|
py
|
Python
|
Codes/Python32/Lib/http/cookies.py
|
eyantra/FireBird_Swiss_Knife
|
cac322cf28e2d690b86ba28a75e87551e5e47988
|
[
"MIT"
] | 319
|
2016-09-22T15:54:48.000Z
|
2022-03-18T02:36:58.000Z
|
Codes/Python32/Lib/http/cookies.py
|
eyantra/FireBird_Swiss_Knife
|
cac322cf28e2d690b86ba28a75e87551e5e47988
|
[
"MIT"
] | 9
|
2016-11-03T21:56:41.000Z
|
2020-08-09T19:27:37.000Z
|
Codes/Python32/Lib/http/cookies.py
|
eyantra/FireBird_Swiss_Knife
|
cac322cf28e2d690b86ba28a75e87551e5e47988
|
[
"MIT"
] | 27
|
2016-10-06T16:05:32.000Z
|
2022-03-18T02:37:00.000Z
|
#!/usr/bin/env python3
#
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy...
>>> from http import cookies
Most of the time you start by creating a cookie.
>>> C = cookies.SimpleCookie()
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = cookies.SimpleCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = cookies.SimpleCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print(C.output(header="Cookie:"))
Cookie: rocky=road; Path=/cookie
>>> print(C.output(attrs=[], header="Cookie:"))
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = cookies.SimpleCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = cookies.SimpleCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print(C)
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = cookies.SimpleCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print(C)
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = cookies.SimpleCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = cookies.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
Finis.
"""
#
# Import our required modules
#
import re
import string
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
def _quote(str, LegalChars=_LegalChars):
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if all(c in LegalChars for c in str):
return str
else:
return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res)
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
# header. By default, _getdate() returns the current time in the appropriate
# "expires" format for a Set-Cookie header. The one optional argument is an
# offset from now, in seconds. For example, an offset of -3600 means "one hour
# ago". The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
class Morsel(dict):
"""A class to hold ONE (key, value) pair.
In a cookie, each such pair may have several attributes, so this class is
used to keep the attributes associated with the appropriate key,value pair.
This class also includes a coded_value attribute, which is used to hold
the network representation of the value. This is most useful when Python
objects are pickled for network transit.
"""
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = {
"expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for key in self._reserved:
dict.__setitem__(self, key, "")
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
def isReservedKey(self, K):
return K.lower() in self._reserved
def set(self, key, val, coded_val, LegalChars=_LegalChars):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if any(c not in LegalChars for c in key):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
def output(self, attrs=None, header="Set-Cookie:"):
return "%s %s" % (header, self.OutputString(attrs))
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value))
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % (self.OutputString(attrs).replace('"', r'\"'))
def OutputString(self, attrs=None):
# Build up our result
#
result = []
append = result.append
# First, the key=value pair
append("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = sorted(self.items())
for key, value in items:
if value == "":
continue
if key not in attrs:
continue
if key == "expires" and isinstance(value, int):
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
elif key == "secure":
append(str(self._reserved[key]))
elif key == "httponly":
append(str(self._reserved[key]))
else:
append("%s=%s" % (self._reserved[key], value))
# Return the result
return _semispacejoin(result)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
(?P<key> # Start of group 'key'
""" + _LegalCharsPatt + r"""+? # Any word of at least one letter
) # End of group 'key'
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
""" + _LegalCharsPatt + r"""* # Any word or empty string
) # End of group 'val'
\s*;? # Probably ending in a semi-colon
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
class BaseCookie(dict):
"""A container class for a set of Morsels."""
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
def __init__(self, input=None):
if input:
self.load(input)
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.output(attrs, header))
return sep.join(result)
__str__ = output
def __repr__(self):
l = []
items = sorted(self.items())
for key, value in items:
l.append('%s=%s' % (key, repr(value.value)))
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.js_output(attrs))
return _nulljoin(result)
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
def __parse_string(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match:
# No more cookies
break
key, value = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if key[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[key[1:]] = value
elif key.lower() in Morsel._reserved:
if M:
M[key] = _unquote(value)
else:
rval, cval = self.value_decode(value)
self.__set(key, rval, cval)
M = self[key]
class SimpleCookie(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)
| 35.589161
| 80
| 0.5462
|
f26c8e58cccd7591550c10ba23bb21ee124b1057
| 4,960
|
py
|
Python
|
lldb/test/API/commands/watchpoints/step_over_watchpoint/TestStepOverWatchpoint.py
|
rarutyun/llvm
|
76fa6b3bcade074bdedef740001c4528e1aa08a8
|
[
"Apache-2.0"
] | 305
|
2019-09-14T17:16:05.000Z
|
2022-03-31T15:05:20.000Z
|
lldb/test/API/commands/watchpoints/step_over_watchpoint/TestStepOverWatchpoint.py
|
rarutyun/llvm
|
76fa6b3bcade074bdedef740001c4528e1aa08a8
|
[
"Apache-2.0"
] | 410
|
2019-06-06T20:52:32.000Z
|
2022-01-18T14:21:48.000Z
|
lldb/test/API/commands/watchpoints/step_over_watchpoint/TestStepOverWatchpoint.py
|
rarutyun/llvm
|
76fa6b3bcade074bdedef740001c4528e1aa08a8
|
[
"Apache-2.0"
] | 50
|
2019-05-10T21:12:24.000Z
|
2022-01-21T06:39:47.000Z
|
"""Test stepping over watchpoints."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestStepOverWatchpoint(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@expectedFailureAll(
oslist=["linux"],
archs=[
'aarch64',
'arm'],
bugnumber="llvm.org/pr26031")
# Read-write watchpoints not supported on SystemZ
@expectedFailureAll(archs=['s390x'])
@expectedFailureAll(oslist=["ios", "watchos", "tvos", "bridgeos"], bugnumber="<rdar://problem/34027183>") # watchpoint tests aren't working on arm64
@add_test_categories(["basic_process"])
def test(self):
"""Test stepping over watchpoints."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(self.target, VALID_TARGET)
lldbutil.run_break_set_by_symbol(self, 'main')
process = target.LaunchSimple(None, None,
self.get_process_working_directory())
self.assertTrue(process.IsValid(), PROCESS_IS_VALID)
self.assertEquals(process.GetState(), lldb.eStateStopped,
PROCESS_STOPPED)
thread = lldbutil.get_stopped_thread(process,
lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), "Failed to get thread.")
frame = thread.GetFrameAtIndex(0)
self.assertTrue(frame.IsValid(), "Failed to get frame.")
read_value = frame.FindValue('g_watch_me_read',
lldb.eValueTypeVariableGlobal)
self.assertTrue(read_value.IsValid(), "Failed to find read value.")
error = lldb.SBError()
# resolve_location=True, read=True, write=False
read_watchpoint = read_value.Watch(True, True, False, error)
self.assertTrue(error.Success(),
"Error while setting watchpoint: %s" %
error.GetCString())
self.assertTrue(read_watchpoint, "Failed to set read watchpoint.")
thread.StepOver()
self.assertEquals(thread.GetStopReason(), lldb.eStopReasonWatchpoint,
STOPPED_DUE_TO_WATCHPOINT)
self.assertEquals(thread.GetStopDescription(20), 'watchpoint 1')
process.Continue()
self.assertEquals(process.GetState(), lldb.eStateStopped,
PROCESS_STOPPED)
self.assertEquals(thread.GetStopDescription(20), 'step over')
self.step_inst_for_watchpoint(1)
write_value = frame.FindValue('g_watch_me_write',
lldb.eValueTypeVariableGlobal)
self.assertTrue(write_value, "Failed to find write value.")
# Most of the MIPS boards provide only one H/W watchpoints, and S/W
# watchpoints are not supported yet
arch = self.getArchitecture()
if re.match("^mips", arch) or re.match("powerpc64le", arch):
self.runCmd("watchpoint delete 1")
# resolve_location=True, read=False, write=True
write_watchpoint = write_value.Watch(True, False, True, error)
self.assertTrue(write_watchpoint, "Failed to set write watchpoint.")
self.assertTrue(error.Success(),
"Error while setting watchpoint: %s" %
error.GetCString())
thread.StepOver()
self.assertEquals(thread.GetStopReason(), lldb.eStopReasonWatchpoint,
STOPPED_DUE_TO_WATCHPOINT)
self.assertEquals(thread.GetStopDescription(20), 'watchpoint 2')
process.Continue()
self.assertEquals(process.GetState(), lldb.eStateStopped,
PROCESS_STOPPED)
self.assertEquals(thread.GetStopDescription(20), 'step over')
self.step_inst_for_watchpoint(2)
def step_inst_for_watchpoint(self, wp_id):
watchpoint_hit = False
current_line = self.frame().GetLineEntry().GetLine()
while self.frame().GetLineEntry().GetLine() == current_line:
self.thread().StepInstruction(False) # step_over=False
stop_reason = self.thread().GetStopReason()
if stop_reason == lldb.eStopReasonWatchpoint:
self.assertFalse(watchpoint_hit, "Watchpoint already hit.")
expected_stop_desc = "watchpoint %d" % wp_id
actual_stop_desc = self.thread().GetStopDescription(20)
self.assertEquals(actual_stop_desc, expected_stop_desc,
"Watchpoint ID didn't match.")
watchpoint_hit = True
else:
self.assertEquals(stop_reason, lldb.eStopReasonPlanComplete,
STOPPED_DUE_TO_STEP_IN)
self.assertTrue(watchpoint_hit, "Watchpoint never hit.")
| 41.333333
| 153
| 0.623589
|
dd43d33e8f1e58b523c9d7096339c814478b0217
| 38,246
|
py
|
Python
|
FilmSetBuilder/__init__.py
|
ajaybhaga/blender-addon-filmsetbuilder
|
8f2572b230f01705fb3c249bb031d86540f4de83
|
[
"MIT"
] | 4
|
2021-09-01T03:16:05.000Z
|
2021-09-08T14:58:21.000Z
|
FilmSetBuilder/__init__.py
|
ajaybhaga/blender-addon-filmsetbuilder
|
8f2572b230f01705fb3c249bb031d86540f4de83
|
[
"MIT"
] | null | null | null |
FilmSetBuilder/__init__.py
|
ajaybhaga/blender-addon-filmsetbuilder
|
8f2572b230f01705fb3c249bb031d86540f4de83
|
[
"MIT"
] | null | null | null |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Design References:
# ------------------
# Add-on: Another Noise Tool - Suite (W.I.P.)
# Author: Jimmy Hazevoet 5/2017
#
# Add-on: Easy City Addon
# Author: Goeminne Colas, Barthel Xavier
#
bl_info = {
"name": "Film Set Builder",
"author": "Ajay Bhaga",
"version": (0, 0, 1),
"blender": (2, 90, 0),
"location": "View3D > UI panel",
"description": "Film Set Builder: Scene Generator",
"warning": "",
# "doc_url": "{BLENDER_MANUAL_URL}/addons/add_mesh/fsb_filmset.html",
"category": "Object",
}
if "bpy" in locals():
import importlib
importlib.reload(gen_set_fsb)
importlib.reload(mesh_ant_displace)
importlib.reload(ant_functions)
importlib.reload(ant_noise)
else:
from FilmSetBuilder import gen_set_fsb
from FilmSetBuilder import mesh_ant_displace
from FilmSetBuilder import ant_functions
from FilmSetBuilder import ant_noise
import bpy
from bpy.props import *
import os
import copy
from bpy.props import (
BoolProperty,
FloatProperty,
IntProperty,
StringProperty,
PointerProperty,
EnumProperty,
)
from .ant_functions import (
draw_ant_refresh,
draw_ant_main,
draw_ant_noise,
draw_ant_displace,
)
#
bpy.types.Scene.city_size = IntProperty(name="Size", default=20)
bpy.types.Scene.max_block_size = IntProperty(name="Block Size", default=7)
bpy.types.Scene.park_mean = FloatProperty(name="Proportion of parks", default=0.1, min=0.0, max=1.0)
bpy.types.Scene.height_mean = FloatProperty(name="Mean building height", default=30.0, min=10.0, max=100.0)
bpy.types.Scene.height_std = FloatProperty(name="Standard deviation building height", default=15.0, min=5.0, max=50.0)
bpy.types.Scene.path_size = IntProperty(name="Path Size", default=50, min=0)
bpy.types.Scene.camera_speed = IntProperty(name="Speed", default=3, min=1,max=5)
matrice=[]
def setMatrice(mat):
global matrice
matrice=copy.deepcopy(mat)
#
# class FilmSetBuilderPanel(bpy.types.Panel):
#
class FilmSetBuilderPanel(bpy.types.Panel):
bl_label = "Film Set Generator"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = 'City'
def draw(self, context):
layout = self.layout
layout.label(text="City Parameters:")
# split = layout.split()
# col = split.column(align=True)
# col.operator("mesh.primitive_plane_add", text="Plane", icon='MESH_PLANE')
# col.operator("mesh.primitive_torus_add", text="Torus", icon='MESH_TORUS')
scene = context.scene
row = layout.row()
row.prop(scene, 'city_size')
row.prop(scene, 'max_block_size')
row = layout.row()
row.prop(scene, 'park_mean')
row = layout.row()
row.prop(scene, 'height_mean')
row.prop(scene, 'height_std')
row = layout.row()
row.operator('city.generate')
row.operator('city.delete')
row = layout.row()
row.operator('city.day')
row.operator('city.night')
row = layout.row()
row.operator('city.cars')
row = layout.row()
layout.label(text="Camera Path Parameters:")
row = layout.row()
row.operator('city.camera_path')
row = layout.row()
row.prop(scene, 'path_size')
row.prop(scene,'camera_speed')
class OBJECT_OT_Day(bpy.types.Operator):
bl_idname = "city.day"
bl_label = "Day Light"
bl_description = "Set day light environment"
def execute(self,context):
print("lenmatrice : ",len(matrice))
floor_repartition.setDayLight(matrice)
return {'FINISHED'}
class OBJECT_OT_Night(bpy.types.Operator):
bl_idname = "city.night"
bl_label = "Night Light"
bl_description = "Set night light environment"
def execute(self,context):
floor_repartition.setNightLight(matrice)
return {'FINISHED'}
class OBJECT_OT_CameraPath(bpy.types.Operator):
bl_idname = "city.camera_path"
bl_label = "Generate Camera Path"
bl_description = "generate a camera path though the city"
def execute(self,context):
floor_repartition.cameraPath(matrice,bpy.context.scene.path_size,bpy.context.scene.camera_speed)
return {'FINISHED'}
class OBJECT_OT_Car(bpy.types.Operator):
bl_idname = "city.cars"
bl_label = "Cars"
bl_description = "Generate cars riding throught the city"
def execute(self,context):
directory = os.path.dirname(__file__)
carsfilepath = os.path.join(directory, "models/cars.blend")
with bpy.data.libraries.load(carsfilepath, link=True) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name.startswith("car")]
cars = [obj for obj in bpy.data.objects if "car" in obj.name]
floor_repartition.carsAnim(matrice, cars)
return {'FINISHED'}
class OBJECT_OT_GenerateCity(bpy.types.Operator):
bl_idname = "city.generate"
bl_label = "Generate"
bl_description = "Generates the city based on the given parameters."
def execute(self, context):
directory = os.path.dirname(__file__)
roadfilepath = os.path.join(directory, "models/road.blend")
with bpy.data.libraries.load(roadfilepath, link=True) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name.startswith("road")]
buildingsfilepath = os.path.join(directory, "models/buildings.blend")
with bpy.data.libraries.load(buildingsfilepath, link=True) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if (name.startswith("building") or name.startswith("house"))]
parksfilepath = os.path.join(directory, "models/parks.blend")
with bpy.data.libraries.load(parksfilepath, link=True) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name.startswith("park")]
urbanfilepath = os.path.join(directory, "models/urban.blend")
with bpy.data.libraries.load(urbanfilepath, link=True) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name.startswith("street") or name.startswith("urban")]
worldfilepath = os.path.join(directory, "models/sky.blend")
with bpy.data.libraries.load(worldfilepath, link=True) as (data_from, data_to):
data_to.worlds = [name for name in data_from.worlds if name.startswith("myWorld")]
worldNightfilepath = os.path.join(directory, "models/skyNight.blend")
with bpy.data.libraries.load(worldNightfilepath, link=True) as (data_from, data_to):
data_to.worlds = [name for name in data_from.worlds if name.startswith("myWorld")]
scene = context.scene
# Remove previous city (if any)
bpy.ops.city.delete()
# Add an empty that will serve as the parent of all buildings
bpy.ops.object.add(type='EMPTY')
empty = bpy.context.object
empty.name = 'City'
# # Get the template objects (name starting with '_'
# objs = [obj for obj in bpy.data.objects if obj.name[0] == '_']
# # Get the mesh from the template object
# meshes = [obj.data for obj in objs]
size = scene.city_size
max_block_size = scene.max_block_size
park_mean = scene.park_mean
height_mean = scene.height_mean
height_std = scene.height_std
roads = { "straight": bpy.data.objects['roadStraight'],
"roadL": bpy.data.objects['roadL'],
"roadT": bpy.data.objects['roadT'],
"roadX": bpy.data.objects['roadX']}
buildings = [obj for obj in bpy.data.objects if ("building" in obj.name or "house" in obj.name)]
parks = [obj for obj in bpy.data.objects if "park" in obj.name]
cars = [obj for obj in bpy.data.objects if "car" in obj.name]
streetLamp=[obj for obj in bpy.data.objects if "street" in obj.name]
urbanObjects=[obj for obj in bpy.data.objects if "urban" in obj.name]
bpy.context.scene.render.engine = 'CYCLES'
mat=copy.deepcopy(floor_repartition.draw_roads_and_buildings(size, roads, buildings, max_block_size, parks, park_mean, height_mean, height_std))
setMatrice(mat)
floor_repartition.setDayLight(mat)
floor_repartition.setUrban(mat,streetLamp,urbanObjects)
# # Create a duplicate linked object of '_Building1'
# for x in np.linspace(-size/2, size/2, size):
# for y in np.linspace(-size/2, size/2, size):
# height = 2 + np.random.rand() * 8 # Random height
# mesh = meshes[np.random.random_integers(len(meshes))-1] # Random mesh from templates
# new_obj = bpy.data.objects.new('Building.000', mesh) # Create new object linked to same mesh data
# new_obj.location = (x*2,y*2,0) # Set its location
# new_obj.scale = (1,1,height) # Set its scale
# scene.objects.link(new_obj) # Link new object to scene
# new_obj.parent = empty # Link new object to empty
return {'FINISHED'}
class OBJECT_OT_DeleteCity(bpy.types.Operator):
bl_idname = "city.delete"
bl_label = "Delete"
def execute(self, context):
scene = context.scene
# Remove previous city
city = bpy.data.objects.get('City') # Get 'City' object
if not city is None: # If exists
bpy.ops.object.select_all(action='DESELECT') # Deselect all
city.select = True # Select City
bpy.ops.object.select_hierarchy(direction='CHILD', # Select all children of City
extend=True)
bpy.ops.object.select_hierarchy(direction='CHILD', extend=True)
bpy.ops.object.delete(use_global=False) # Delete selection
return {'FINISHED'}
#
# ------------------------------------------------------------
# Menu's and panels
def menu_func_eroder(self, context):
ob = bpy.context.active_object
if ob and (ob.fsb_filmset.keys() and not ob.fsb_filmset['sphere_mesh']):
self.layout.operator('mesh.eroder', text="Landscape Eroder", icon='SMOOTHCURVE')
def menu_func_landscape(self, context):
layout = self.layout
layout.separator()
self.layout.operator('mesh.filmset_generate', text="Film Set", icon="RNDCURVE")
# Landscape Add Panel
class FilmSetBuilderAddPanel(bpy.types.Panel):
bl_category = "Create"
bl_label = "Film Set Builder"
bl_idname = "ANTLANDSCAPE_PT_add"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_context = "objectmode"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
col = self.layout.column()
col.operator('mesh.filmset_generate', text="Generate Film Set", icon="WORLD")
col.operator('mesh.actors_generate', text="Generate Actors", icon="OBJECT_DATA")
col.operator('mesh.cameras_generate', text="Generate Cameras", icon="CAMERA_DATA")
col.operator('mesh.paths_generate', text="Generate Paths", icon="ANIM_DATA")
col.operator('mesh.lights_generate', text="Generate Lights", icon="LIGHT_DATA")
# Landscape Tools:
class AntLandscapeToolsPanel(bpy.types.Panel):
bl_category = "Create"
bl_label = "Film Set Builder Tools"
bl_idname = "ANTLANDSCAPE_PT_tools"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_context = "objectmode"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
ob = bpy.context.active_object
return (ob and ob.type == 'MESH')
def draw(self, context):
layout = self.layout
ob = context.active_object
col = layout.column()
col.operator('mesh.ant_displace', text="Randomize Actors", icon="OBJECT_DATA")
col.operator('mesh.ant_displace', text="Randomize Environment", icon="SCENE_DATA")
col.operator('mesh.ant_slope_map', icon='GROUP_VERTEX')
if ob.fsb_filmset.keys() and not ob.fsb_filmset['sphere_mesh']:
col.operator('mesh.eroder', text="Landscape Eroder", icon='SMOOTHCURVE')
# Film Set Settings
class FilmSetSettingsPanel(bpy.types.Panel):
bl_category = "Create"
bl_label = "Film Set Settings"
bl_idname = "ANTLANDSCAPE_PT_noise"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
ob = bpy.context.active_object
return ob.fsb_filmset.keys() if ob else False
def draw(self, context):
layout = self.layout
scene = context.scene
ob = bpy.context.active_object
ant = ob.fsb_filmset
box = layout.box()
col = box.column(align=True)
col.scale_y = 1.5
if ant.sphere_mesh:
col.operator('mesh.fsb_filmset_regenerate', text="Regenerate", icon="LOOP_FORWARDS")
else:
col.operator('mesh.fsb_filmset_refresh', text="Refresh", icon="FILE_REFRESH")
box.prop(ant, "noise_type")
if ant.noise_type == "blender_texture":
box.prop_search(ant, "texture_block", bpy.data, "textures")
else:
box.prop(ant, "basis_type")
col = box.column(align=True)
col.prop(ant, "random_seed")
col = box.column(align=True)
col.prop(ant, "noise_offset_x")
col.prop(ant, "noise_offset_y")
if ant.sphere_mesh:
col.prop(ant, "noise_offset_z")
col.prop(ant, "noise_size_x")
col.prop(ant, "noise_size_y")
if ant.sphere_mesh:
col.prop(ant, "noise_size_z")
col = box.column(align=True)
col.prop(ant, "noise_size")
col = box.column(align=True)
if ant.noise_type == "multi_fractal":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
elif ant.noise_type == "ridged_multi_fractal":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "gain")
elif ant.noise_type == "hybrid_multi_fractal":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "gain")
elif ant.noise_type == "hetero_terrain":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
elif ant.noise_type == "fractal":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
elif ant.noise_type == "turbulence_vector":
col.prop(ant, "noise_depth")
col.prop(ant, "amplitude")
col.prop(ant, "frequency")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
elif ant.noise_type == "variable_lacunarity":
box.prop(ant, "vl_basis_type")
box.prop(ant, "distortion")
elif ant.noise_type == "marble_noise":
box.prop(ant, "marble_shape")
box.prop(ant, "marble_bias")
box.prop(ant, "marble_sharp")
col = box.column(align=True)
col.prop(ant, "distortion")
col.prop(ant, "noise_depth")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
elif ant.noise_type == "shattered_hterrain":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "distortion")
elif ant.noise_type == "strata_hterrain":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "distortion", text="Strata")
elif ant.noise_type == "ant_turbulence":
col.prop(ant, "noise_depth")
col.prop(ant, "amplitude")
col.prop(ant, "frequency")
col.prop(ant, "distortion")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
elif ant.noise_type == "vl_noise_turbulence":
col.prop(ant, "noise_depth")
col.prop(ant, "amplitude")
col.prop(ant, "frequency")
col.prop(ant, "distortion")
col.separator()
box.prop(ant, "vl_basis_type")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
elif ant.noise_type == "vl_hTerrain":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "distortion")
col.separator()
box.prop(ant, "vl_basis_type")
elif ant.noise_type == "distorted_heteroTerrain":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "distortion")
col.separator()
col.prop(ant, "vl_basis_type")
elif ant.noise_type == "double_multiFractal":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "gain")
col.separator()
box.prop(ant, "vl_basis_type")
elif ant.noise_type == "rocks_noise":
col.prop(ant, "noise_depth")
col.prop(ant, "distortion")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
elif ant.noise_type == "slick_rock":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "gain")
col.prop(ant, "offset")
col.prop(ant, "distortion")
col.separator()
box.prop(ant, "vl_basis_type")
elif ant.noise_type == "planet_noise":
col.prop(ant, "noise_depth")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
# Effects mix
col = box.column(align=False)
box.prop(ant, "fx_type")
if ant.fx_type != "0":
if int(ant.fx_type) <= 12:
box.prop(ant, "fx_bias")
box.prop(ant, "fx_mix_mode")
col = box.column(align=True)
col.prop(ant, "fx_mixfactor")
col = box.column(align=True)
col.prop(ant, "fx_loc_x")
col.prop(ant, "fx_loc_y")
col.prop(ant, "fx_size")
col = box.column(align=True)
col.prop(ant, "fx_depth")
if ant.fx_depth != 0:
col.prop(ant, "fx_frequency")
col.prop(ant, "fx_amplitude")
col.prop(ant, "fx_turb")
col = box.column(align=True)
row = col.row(align=True).split(factor=0.92, align=True)
row.prop(ant, "fx_height")
row.prop(ant, "fx_invert", toggle=True, text="", icon='ARROW_LEFTRIGHT')
col.prop(ant, "fx_offset")
# ------------------------------------------------------------
# Properties group
class AntLandscapePropertiesGroup(bpy.types.PropertyGroup):
ant_terrain_name: StringProperty(
name="Name",
default="Landscape"
)
land_material: StringProperty(
name='Material',
default="",
description="Terrain material"
)
water_material: StringProperty(
name='Material',
default="",
description="Water plane material"
)
texture_block: StringProperty(
name="Texture",
default=""
)
at_cursor: BoolProperty(
name="Cursor",
default=True,
description="Place at cursor location",
)
smooth_mesh: BoolProperty(
name="Smooth",
default=True,
description="Shade smooth"
)
tri_face: BoolProperty(
name="Triangulate",
default=False,
description="Triangulate faces"
)
sphere_mesh: BoolProperty(
name="Sphere",
default=False,
description="Generate uv sphere - remove doubles when ready"
)
subdivision_x: IntProperty(
name="Subdivisions X",
default=128,
min=4,
max=6400,
description="Mesh X subdivisions"
)
subdivision_y: IntProperty(
default=128,
name="Subdivisions Y",
min=4,
max=6400,
description="Mesh Y subdivisions"
)
mesh_size: FloatProperty(
default=2.0,
name="Mesh Size",
min=0.01,
max=100000.0,
description="Mesh size"
)
mesh_size_x: FloatProperty(
default=2.0,
name="Mesh Size X",
min=0.01,
description="Mesh x size"
)
mesh_size_y: FloatProperty(
name="Mesh Size Y",
default=2.0,
min=0.01,
description="Mesh y size"
)
random_seed: IntProperty(
name="Random Seed",
default=0,
min=0,
description="Randomize noise origin"
)
noise_offset_x: FloatProperty(
name="Offset X",
default=0.0,
description="Noise X Offset"
)
noise_offset_y: FloatProperty(
name="Offset Y",
default=0.0,
description="Noise Y Offset"
)
noise_offset_z: FloatProperty(
name="Offset Z",
default=0.0,
description="Noise Z Offset"
)
noise_size_x: FloatProperty(
default=1.0,
name="Size X",
min=0.01,
max=1000.0,
description="Noise x size"
)
noise_size_y: FloatProperty(
name="Size Y",
default=1.0,
min=0.01,
max=1000.0,
description="Noise y size"
)
noise_size_z: FloatProperty(
name="Size Z",
default=1.0,
min=0.01,
max=1000.0,
description="Noise Z size"
)
noise_size: FloatProperty(
name="Noise Size",
default=1.0,
min=0.01,
max=1000.0,
description="Noise size"
)
noise_type: EnumProperty(
name="Noise Type",
default='hetero_terrain',
description="Noise type",
items = [
('multi_fractal', "Multi Fractal", "Blender: Multi Fractal algorithm", 0),
('ridged_multi_fractal', "Ridged MFractal", "Blender: Ridged Multi Fractal", 1),
('hybrid_multi_fractal', "Hybrid MFractal", "Blender: Hybrid Multi Fractal", 2),
('hetero_terrain', "Hetero Terrain", "Blender: Hetero Terrain", 3),
('fractal', "fBm Fractal", "Blender: fBm - Fractional Browninian motion", 4),
('turbulence_vector', "Turbulence", "Blender: Turbulence Vector", 5),
('variable_lacunarity', "Distorted Noise", "Blender: Distorted Noise", 6),
('marble_noise', "Marble", "A.N.T.: Marble Noise", 7),
('shattered_hterrain', "Shattered hTerrain", "A.N.T.: Shattered hTerrain", 8),
('strata_hterrain', "Strata hTerrain", "A.N.T: Strata hTerrain", 9),
('ant_turbulence', "Another Noise", "A.N.T: Turbulence variation", 10),
('vl_noise_turbulence', "vlNoise turbulence", "A.N.T: Real vlNoise turbulence", 11),
('vl_hTerrain', "vlNoise hTerrain", "A.N.T: vlNoise hTerrain", 12),
('distorted_heteroTerrain', "Distorted hTerrain", "A.N.T distorted hTerrain", 13),
('double_multiFractal', "Double MultiFractal", "A.N.T: double multiFractal", 14),
('rocks_noise', "Noise Rocks", "A.N.T: turbulence variation", 15),
('slick_rock', "Slick Rock", "A.N.T: slick rock", 16),
('planet_noise', "Planet Noise", "Planet Noise by: Farsthary", 17),
('blender_texture', "Blender Texture - Texture Nodes", "Blender texture data block", 18)]
)
basis_type: EnumProperty(
name="Noise Basis",
default=ant_noise.noise_basis_default,
description="Noise basis algorithms",
items = ant_noise.noise_basis
)
vl_basis_type: EnumProperty(
name="vlNoise Basis",
default=ant_noise.noise_basis_default,
description="VLNoise basis algorithms",
items = ant_noise.noise_basis
)
distortion: FloatProperty(
name="Distortion",
default=1.0,
min=0.01,
max=100.0,
description="Distortion amount"
)
hard_noise: EnumProperty(
name="Soft Hard",
default="0",
description="Soft Noise, Hard noise",
items = [
("0", "Soft", "Soft Noise", 0),
("1", "Hard", "Hard noise", 1)]
)
noise_depth: IntProperty(
name="Depth",
default=8,
min=0,
max=16,
description="Noise Depth - number of frequencies in the fBm"
)
amplitude: FloatProperty(
name="Amp",
default=0.5,
min=0.01,
max=1.0,
description="Amplitude"
)
frequency: FloatProperty(
name="Freq",
default=2.0,
min=0.01,
max=5.0,
description="Frequency"
)
dimension: FloatProperty(
name="Dimension",
default=1.0,
min=0.01,
max=2.0,
description="H - fractal dimension of the roughest areas"
)
lacunarity: FloatProperty(
name="Lacunarity",
min=0.01,
max=6.0,
default=2.0,
description="Lacunarity - gap between successive frequencies"
)
offset: FloatProperty(
name="Offset",
default=1.0,
min=0.01,
max=6.0,
description="Offset - raises the terrain from sea level"
)
gain: FloatProperty(
name="Gain",
default=1.0,
min=0.01,
max=6.0,
description="Gain - scale factor"
)
marble_bias: EnumProperty(
name="Bias",
default="0",
description="Marble bias",
items = [
("0", "Sin", "Sin", 0),
("1", "Cos", "Cos", 1),
("2", "Tri", "Tri", 2),
("3", "Saw", "Saw", 3)]
)
marble_sharp: EnumProperty(
name="Sharp",
default="0",
description="Marble sharpness",
items = [
("0", "Soft", "Soft", 0),
("1", "Sharp", "Sharp", 1),
("2", "Sharper", "Sharper", 2),
("3", "Soft inv.", "Soft", 3),
("4", "Sharp inv.", "Sharp", 4),
("5", "Sharper inv.", "Sharper", 5)]
)
marble_shape: EnumProperty(
name="Shape",
default="0",
description="Marble shape",
items= [
("0", "Default", "Default", 0),
("1", "Ring", "Ring", 1),
("2", "Swirl", "Swirl", 2),
("3", "Bump", "Bump", 3),
("4", "Wave", "Wave", 4),
("5", "Z", "Z", 5),
("6", "Y", "Y", 6),
("7", "X", "X", 7)]
)
height: FloatProperty(
name="Height",
default=0.5,
min=-10000.0,
max=10000.0,
description="Noise intensity scale"
)
height_invert: BoolProperty(
name="Invert",
default=False,
description="Height invert",
)
height_offset: FloatProperty(
name="Offset",
default=0.0,
min=-10000.0,
max=10000.0,
description="Height offset"
)
fx_mixfactor: FloatProperty(
name="Mix Factor",
default=0.0,
min=-1.0,
max=1.0,
description="Effect mix factor: -1.0 = Noise, +1.0 = Effect"
)
fx_mix_mode: EnumProperty(
name="Effect Mix",
default="0",
description="Effect mix mode",
items = [
("0", "Mix", "Mix", 0),
("1", "Add", "Add", 1),
("2", "Sub", "Subtract", 2),
("3", "Mul", "Multiply", 3),
("4", "Abs", "Absolute", 4),
("5", "Scr", "Screen", 5),
("6", "Mod", "Modulo", 6),
("7", "Min", "Minimum", 7),
("8", "Max", "Maximum", 8)
]
)
fx_type: EnumProperty(
name="Effect Type",
default="0",
description="Effect type",
items = [
("0", "None", "No effect", 0),
("1", "Gradient", "Gradient", 1),
("2", "Waves", "Waves - Bumps", 2),
("3", "Zigzag", "Zigzag", 3),
("4", "Wavy", "Wavy", 4),
("5", "Bump", "Bump", 5),
("6", "Dots", "Dots", 6),
("7", "Rings", "Rings", 7),
("8", "Spiral", "Spiral", 8),
("9", "Square", "Square", 9),
("10", "Blocks", "Blocks", 10),
("11", "Grid", "Grid", 11),
("12", "Tech", "Tech", 12),
("13", "Crackle", "Crackle", 13),
("14", "Cracks", "Cracks", 14),
("15", "Rock", "Rock noise", 15),
("16", "Lunar", "Craters", 16),
("17", "Cosine", "Cosine", 17),
("18", "Spikey", "Spikey", 18),
("19", "Stone", "Stone", 19),
("20", "Flat Turb", "Flat turbulence", 20),
("21", "Flat Voronoi", "Flat voronoi", 21)
]
)
fx_bias: EnumProperty(
name="Effect Bias",
default="0",
description="Effect bias type",
items = [
("0", "Sin", "Sin", 0),
("1", "Cos", "Cos", 1),
("2", "Tri", "Tri", 2),
("3", "Saw", "Saw", 3),
("4", "None", "None", 4)]
)
fx_turb: FloatProperty(
name="Distortion",
default=0.0,
min=0.0,
max=1000.0,
description="Effect turbulence distortion"
)
fx_depth: IntProperty(
name="Depth",
default=0,
min=0,
max=16,
description="Effect depth - number of frequencies"
)
fx_amplitude: FloatProperty(
name="Amp",
default=0.5,
min=0.01,
max=1.0,
description="Amplitude"
)
fx_frequency: FloatProperty(
name="Freq",
default=2.0,
min=0.01,
max=5.0,
description="Frequency"
)
fx_size: FloatProperty(
name="Effect Size",
default=1.0,
min=0.01,
max=1000.0,
description="Effect size"
)
fx_loc_x: FloatProperty(
name="Offset X",
default=0.0,
description="Effect x offset"
)
fx_loc_y: FloatProperty(
name="Offset Y",
default=0.0,
description="Effect y offset"
)
fx_height: FloatProperty(
name="Intensity",
default=1.0,
min=-1000.0,
max=1000.0,
description="Effect intensity scale"
)
fx_invert: BoolProperty(
name="Invert",
default=False,
description="Effect invert"
)
fx_offset: FloatProperty(
name="Offset",
default=0.0,
min=-1000.0,
max=1000.0,
description="Effect height offset"
)
edge_falloff: EnumProperty(
name="Falloff",
default="3",
description="Flatten edges",
items = [
("0", "None", "None", 0),
("1", "Y", "Y Falloff", 1),
("2", "X", "X Falloff", 2),
("3", "X Y", "X Y Falloff", 3)]
)
falloff_x: FloatProperty(
name="Falloff X",
default=4.0,
min=0.1,
max=100.0,
description="Falloff x scale"
)
falloff_y: FloatProperty(
name="Falloff Y",
default=4.0,
min=0.1,
max=100.0,
description="Falloff y scale"
)
edge_level: FloatProperty(
name="Edge Level",
default=0.0,
min=-10000.0,
max=10000.0,
description="Edge level, sealevel offset"
)
maximum: FloatProperty(
name="Maximum",
default=1.0,
min=-10000.0,
max=10000.0,
description="Maximum, flattens terrain at plateau level"
)
minimum: FloatProperty(
name="Minimum",
default=-1.0,
min=-10000.0,
max=10000.0,
description="Minimum, flattens terrain at seabed level"
)
vert_group: StringProperty(
name="Vertex Group",
default=""
)
strata: FloatProperty(
name="Amount",
default=5.0,
min=0.01,
max=1000.0,
description="Strata layers / terraces"
)
strata_type: EnumProperty(
name="Strata",
default="0",
description="Strata types",
items = [
("0", "None", "No strata", 0),
("1", "Smooth", "Smooth transitions", 1),
("2", "Sharp Sub", "Sharp subtract transitions", 2),
("3", "Sharp Add", "Sharp add transitions", 3),
("4", "Quantize", "Quantize", 4),
("5", "Quantize Mix", "Quantize mixed", 5)]
)
water_plane: BoolProperty(
name="Water Plane",
default=False,
description="Add water plane"
)
water_level: FloatProperty(
name="Level",
default=0.01,
min=-10000.0,
max=10000.0,
description="Water level"
)
remove_double: BoolProperty(
name="Remove Doubles",
default=False,
description="Remove doubles"
)
refresh: BoolProperty(
name="Refresh",
default=False,
description="Refresh"
)
auto_refresh: BoolProperty(
name="Auto",
default=True,
description="Automatic refresh"
)
# ------------------------------------------------------------
# Register:
classes = (
FilmSetBuilderAddPanel,
AntLandscapeToolsPanel,
FilmSetSettingsPanel,
AntLandscapePropertiesGroup,
gen_set_fsb.GenerateFilmSet,
gen_set_fsb.GenerateActors,
mesh_ant_displace.AntMeshDisplace,
ant_functions.FilmSetRefresh,
ant_functions.FilmSetRegenerate,
ant_functions.AntVgSlopeMap,
ant_functions.Eroder,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.VIEW3D_MT_mesh_add.append(menu_func_landscape)
bpy.types.Object.fsb_filmset = PointerProperty(type=AntLandscapePropertiesGroup, name="FSB_Filmset", description="Filmset properties")
bpy.types.VIEW3D_MT_paint_weight.append(menu_func_eroder)
def unregister():
for cls in reversed(classes):
bpy.utils.unregister_class(cls)
bpy.types.VIEW3D_MT_mesh_add.remove(menu_func_landscape)
bpy.types.VIEW3D_MT_paint_weight.remove(menu_func_eroder)
if __name__ == "__main__":
register()
| 35.152574
| 152
| 0.536762
|
de3f9a7f5790f0495788bf2dfad02e020d9c9b43
| 2,402
|
py
|
Python
|
plugin/CustomerSupportArchive/Lane_Diagnostics/tools/adc.py
|
iontorrent/TS
|
7591590843c967435ee093a3ffe9a2c6dea45ed8
|
[
"Apache-2.0"
] | 125
|
2015-01-22T05:43:23.000Z
|
2022-03-22T17:15:59.000Z
|
plugin/CustomerSupportArchive/NucStepSpatialV2/tools/adc.py
|
iontorrent/TS
|
7591590843c967435ee093a3ffe9a2c6dea45ed8
|
[
"Apache-2.0"
] | 59
|
2015-02-10T09:13:06.000Z
|
2021-11-11T02:32:38.000Z
|
plugin/CustomerSupportArchive/Lane_Diagnostics/tools/adc.py
|
iontorrent/TS
|
7591590843c967435ee093a3ffe9a2c6dea45ed8
|
[
"Apache-2.0"
] | 98
|
2015-01-17T01:25:10.000Z
|
2022-03-18T17:29:42.000Z
|
"""
Contains helpful functions to unravel or ravel chip data between physical columns and per-adc quasi-columns.
"""
import numpy as np
def block_reshape( data, blocksize ):
''' Reshapes the 2D data into 3D data, with the 3rd dimension being adjacent pixels '''
rows, cols = data.shape
numR = rows/blocksize[0]
numC = cols/blocksize[1]
return data.reshape(rows , numC , -1 ).transpose((1,0,2)).reshape(numC,numR,-1).transpose((1,0,2))
def unreshape( data, blocksize ):
''' Reverses the output of block_reshape '''
numR, numC, els = data.shape
rows = numR*blocksize[0]
cols = numC*blocksize[1]
return data.transpose((1,0,2)).reshape( numC, rows, -1 ).transpose((1,0,2)).reshape( rows, cols )
def im2adc_550( frame ):
''' Converts a 550 "datacollect" image (well layout) to an "ADC" image (pixel layout)'''
if frame.ndim == 3:
return np.array( [ im2adc_550(f) for f in frame.transpose((2,0,1)) ] ).transpose((1,2,0))
blocks = block_reshape( frame, (3,4) )
blocks1 = blocks[:,:,(0,4,1,6,2,3,8,9,5,10,7,11)]
return unreshape( blocks1, (2,6) )
def adc2im_550( frame ):
''' Converts a 550 "ADC" image (pixel layout) to a "datacollect" image (well laout) '''
if frame.ndim == 3:
return np.array( [ adc2im_550(f) for f in frame.transpose((2,0,1)) ] ).transpose((1,2,0))
blocks = block_reshape( frame, (2,6) )
blocks1 = blocks[:,:,(0,2,4,5,1,8,3,10,6,7,9,11)]
return unreshape( blocks1, (3,4) )
def im2adc_550_mb( frame ):
''' This undos Mark B's reshape function in /software/p2/dev.py:convertImage (real code is burried in assembly) '''
if frame.ndim == 3:
return np.array( [ im2adc_550(f) for f in frame.transpose((2,0,1)) ] ).transpose((1,2,0))
blocks = block_reshape( frame, (3,4) )
#blocks1 = blocks[:,:,(0,4,1,6,2,3,8,9,5,10,7,11)]
blocks1 = blocks[:,:,(4,0,1,2,6,3,8,9,5,10,7,11)]
return unreshape( blocks1, (2,6) )
def adc2im_550_mb( frame ):
''' THis mimics Mark B's reshape function '''
if frame.ndim == 3:
return np.array( [ adc2im_550(f) for f in frame.transpose((2,0,1)) ] ).transpose((1,2,0))
blocks = block_reshape( frame, (2,6) )
#blocks1 = blocks[:,:,(0,2,4,5,1,8,3,10,6,7,9,11)]
#blocks1 = blocks[:,:,(1,2,4,5,0,8,3,10,6,7,9,11)]
blocks1 = blocks[:,:,(1,2,3,5,0,8,4,10,6,7,9,11)]
return unreshape( blocks1, (3,4) )
| 42.140351
| 119
| 0.618235
|
0a83b0f192f8ae0b243051eb123c961cac04032e
| 16,014
|
py
|
Python
|
volta/volta/train_utils.py
|
e-bug/cross-modal-ablation
|
130288670e3d898179998c2d727c5b8e82e49b60
|
[
"MIT"
] | 17
|
2021-09-12T07:20:07.000Z
|
2022-01-17T06:45:22.000Z
|
volta/volta/train_utils.py
|
e-bug/cross-modal-ablation
|
130288670e3d898179998c2d727c5b8e82e49b60
|
[
"MIT"
] | null | null | null |
volta/volta/train_utils.py
|
e-bug/cross-modal-ablation
|
130288670e3d898179998c2d727c5b8e82e49b60
|
[
"MIT"
] | 2
|
2021-10-08T12:04:16.000Z
|
2022-03-09T08:30:40.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2020, Emanuele Bugliarello (@e-bug).
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import logging
from io import open
from tensorboardX import SummaryWriter
import torch
logger = logging.getLogger(__name__)
class tbLogger(object):
def __init__(self, log_dir, txt_dir, task_names, task_ids, task_num_iters,
gradient_accumulation_steps, save_logger=True, txt_name="out.txt"):
logger.info("logging file at: " + log_dir)
self.save_logger = save_logger
self.log_dir = log_dir
self.txt_dir = txt_dir
if self.save_logger:
self.logger = SummaryWriter(log_dir=log_dir)
self.txt_f = open(txt_dir + "/" + txt_name, "w")
self.task_id2name = {ids: name.replace("+", "plus") for ids, name in zip(task_ids, task_names)}
self.task_ids = task_ids
self.task_loss = {task_id: 0 for task_id in task_ids}
self.task_loss_tmp = {task_id: 0 for task_id in task_ids}
self.task_score_tmp = {task_id: 0 for task_id in task_ids}
self.task_norm_tmp = {task_id: 0 for task_id in task_ids}
self.task_step = {task_id: 0 for task_id in task_ids}
self.task_step_tmp = {task_id: 0 for task_id in task_ids}
self.task_num_iters = task_num_iters
self.epochId = 0
self.gradient_accumulation_steps = gradient_accumulation_steps
self.task_loss_val = {task_id: 0 for task_id in task_ids}
self.task_score_val = {task_id: 0 for task_id in task_ids}
self.task_step_val = {task_id: 0 for task_id in task_ids}
self.task_iter_val = {task_id: 0 for task_id in task_ids}
self.task_datasize_val = {task_id: 0 for task_id in task_ids}
self.masked_t_loss = {task_id: 0 for task_id in task_ids}
self.masked_v_loss = {task_id: 0 for task_id in task_ids}
self.next_sentense_loss = {task_id: 0 for task_id in task_ids}
self.masked_t_loss_val = {task_id: 0 for task_id in task_ids}
self.masked_v_loss_val = {task_id: 0 for task_id in task_ids}
self.next_sentense_loss_val = {task_id: 0 for task_id in task_ids}
def __getstate__(self):
d = dict(self.__dict__)
del d["logger"]
del d["txt_f"]
return d
def __setstate__(self, d):
self.__dict__.update(d)
if self.save_logger:
self.logger = SummaryWriter(log_dir=self.log_dir)
self.txt_f = open(self.txt_dir + "/" + "out.txt", "a")
def txt_close(self):
self.txt_f.close()
def linePlot(self, step, val, split, key, xlabel="None"):
if self.save_logger:
self.logger.add_scalar(split + "/" + key, val, step)
def step_train(self, epochId, stepId, loss, score, norm, task_id, split):
self.task_loss[task_id] += loss
self.task_loss_tmp[task_id] += loss
self.task_score_tmp[task_id] += score
self.task_norm_tmp[task_id] += norm
self.task_step[task_id] += self.gradient_accumulation_steps
self.task_step_tmp[task_id] += self.gradient_accumulation_steps
self.epochId = epochId
# plot on tensorboard.
self.linePlot(stepId, loss, split, self.task_id2name[task_id] + "_loss")
self.linePlot(stepId, score, split, self.task_id2name[task_id] + "_score")
self.linePlot(stepId, norm, split, self.task_id2name[task_id] + "_norm")
def step_train_CC(self, epochId, stepId, masked_loss_t, masked_loss_v, next_sentence_loss, norm, task_id, split):
self.masked_t_loss[task_id] += masked_loss_t
self.masked_v_loss[task_id] += masked_loss_v
self.next_sentense_loss[task_id] += next_sentence_loss
self.task_norm_tmp[task_id] += norm
self.task_step[task_id] += self.gradient_accumulation_steps
self.task_step_tmp[task_id] += self.gradient_accumulation_steps
self.epochId = epochId
# plot on tensorboard.
self.linePlot(stepId, masked_loss_t, split, self.task_id2name[task_id] + "_masked_loss_t")
self.linePlot(stepId, masked_loss_v, split, self.task_id2name[task_id] + "_masked_loss_v")
self.linePlot(stepId, next_sentence_loss, split, self.task_id2name[task_id] + "_next_sentence_loss")
def step_val(self, epochId, loss, score, task_id, batch_size, split):
self.task_loss_val[task_id] += loss * batch_size
self.task_score_val[task_id] += score
self.task_step_val[task_id] += self.gradient_accumulation_steps
self.task_datasize_val[task_id] += batch_size
def step_val_CC(self, epochId, masked_loss_t, masked_loss_v, next_sentence_loss, task_id, batch_size, split):
self.masked_t_loss_val[task_id] += masked_loss_t
self.masked_v_loss_val[task_id] += masked_loss_v
self.next_sentense_loss_val[task_id] += next_sentence_loss
self.task_step_val[task_id] += self.gradient_accumulation_steps
self.task_datasize_val[task_id] += batch_size
def showLossValAll(self):
progressInfo = "Eval Ep: %d " % self.epochId
lossInfo = "Validation "
val_scores = {}
ave_loss = 0
for task_id in self.task_ids:
loss = self.task_loss_val[task_id] / float(self.task_step_val[task_id])
score = self.task_score_val[task_id] / float(self.task_datasize_val[task_id])
val_scores[task_id] = score
ave_loss += loss
lossInfo += "[%s]: loss %.3f score %.3f " % (self.task_id2name[task_id], loss, score * 100.0)
self.linePlot(self.epochId, loss, "val", self.task_id2name[task_id] + "_loss")
self.linePlot(self.epochId, score, "val", self.task_id2name[task_id] + "_score")
self.task_loss_val = {task_id: 0 for task_id in self.task_loss_val}
self.task_score_val = {task_id: 0 for task_id in self.task_score_val}
self.task_datasize_val = {task_id: 0 for task_id in self.task_datasize_val}
self.task_step_val = {task_id: 0 for task_id in self.task_ids}
logger.info(progressInfo)
logger.info(lossInfo)
print(lossInfo, file=self.txt_f)
return val_scores
def getValScore(self, task_id):
return self.task_score_val[task_id] / float(self.task_datasize_val[task_id])
def showLossVal(self, task_id, task_stop_controller=None):
progressInfo = "Eval task %s on iteration %d " % (task_id, self.task_step[task_id])
lossInfo = "Validation "
ave_loss = 0
loss = self.task_loss_val[task_id] / float(self.task_datasize_val[task_id])
score = self.task_score_val[task_id] / float(self.task_datasize_val[task_id])
ave_loss += loss
lossInfo += "[%s]: loss %.3f score %.3f " % (self.task_id2name[task_id], loss, score * 100.0)
self.linePlot(self.task_step[task_id], loss, "val", self.task_id2name[task_id] + "_loss")
self.linePlot(self.task_step[task_id], score, "val", self.task_id2name[task_id] + "_score")
if task_stop_controller is not None:
self.linePlot(self.task_step[task_id], task_stop_controller[task_id].in_stop,
"val", self.task_id2name[task_id] + "_early_stop")
self.task_loss_val[task_id] = 0
self.task_score_val[task_id] = 0
self.task_datasize_val[task_id] = 0
self.task_step_val[task_id] = 0
logger.info(progressInfo)
logger.info(lossInfo)
print(lossInfo, file=self.txt_f)
return score
def showLossTrain(self):
# show the current loss, once showed, reset the loss.
lossInfo = ""
for task_id in self.task_ids:
if self.task_num_iters[task_id] > 0:
if self.task_step_tmp[task_id]:
lossInfo += (
"[%s]: iter %d Ep: %.2f loss %.3f score %.3f lr %.6g "
% (
self.task_id2name[task_id], self.task_step[task_id],
self.task_step[task_id] / float(self.task_num_iters[task_id]),
self.task_loss_tmp[task_id] / float(self.task_step_tmp[task_id]),
self.task_score_tmp[task_id] / float(self.task_step_tmp[task_id]),
self.task_norm_tmp[task_id] / float(self.task_step_tmp[task_id]),
)
)
logger.info(lossInfo)
print(lossInfo, file=self.txt_f)
self.task_step_tmp = {task_id: 0 for task_id in self.task_ids}
self.task_loss_tmp = {task_id: 0 for task_id in self.task_ids}
self.task_score_tmp = {task_id: 0 for task_id in self.task_ids}
self.task_norm_tmp = {task_id: 0 for task_id in self.task_ids}
def showLossValCC(self):
lossInfo = "Validation "
for task_id in self.task_ids:
masked_t_loss_val = self.masked_t_loss_val[task_id] / float(self.task_step_val[task_id])
masked_v_loss_val = self.masked_v_loss_val[task_id] / float(self.task_step_val[task_id])
next_sentense_loss_val = self.next_sentense_loss_val[task_id] / float(self.task_step_val[task_id])
lossInfo += "[%s]: masked_t %.3f masked_v %.3f NSP %.3f" % (
self.task_id2name[task_id],
masked_t_loss_val,
masked_v_loss_val,
next_sentense_loss_val,
)
self.linePlot(self.epochId, masked_t_loss_val, "val", self.task_id2name[task_id] + "_mask_t")
self.linePlot(self.epochId, masked_v_loss_val, "val", self.task_id2name[task_id] + "_maks_v")
self.linePlot(self.epochId, next_sentense_loss_val, "val", self.task_id2name[task_id] + "_nsp")
self.masked_t_loss_val = {task_id: 0 for task_id in self.masked_t_loss_val}
self.masked_v_loss_val = {task_id: 0 for task_id in self.masked_v_loss_val}
self.next_sentense_loss_val = {task_id: 0 for task_id in self.next_sentense_loss_val}
self.task_datasize_val = {task_id: 0 for task_id in self.task_datasize_val}
self.task_step_val = {task_id: 0 for task_id in self.task_ids}
logger.info(lossInfo)
print(lossInfo, file=self.txt_f)
def showLossTrainCC(self):
# show the current loss, once showed, reset the loss.
lossInfo = ""
for task_id in self.task_ids:
if self.task_num_iters[task_id] > 0:
if self.task_step_tmp[task_id]:
lossInfo += (
"[%s]: iter %d Ep: %.2f masked_t %.3f masked_v %.3f NSP %.3f lr %.6g"
% (
self.task_id2name[task_id], self.task_step[task_id],
self.task_step[task_id] / float(self.task_num_iters[task_id]),
self.masked_t_loss[task_id] / float(self.task_step_tmp[task_id]),
self.masked_v_loss[task_id] / float(self.task_step_tmp[task_id]),
self.next_sentense_loss[task_id] / float(self.task_step_tmp[task_id]),
self.task_norm_tmp[task_id] / float(self.task_step_tmp[task_id]),
)
)
logger.info(lossInfo)
print(lossInfo, file=self.txt_f)
self.task_step_tmp = {task_id: 0 for task_id in self.task_ids}
self.masked_t_loss = {task_id: 0 for task_id in self.task_ids}
self.masked_v_loss = {task_id: 0 for task_id in self.task_ids}
self.next_sentense_loss = {task_id: 0 for task_id in self.task_ids}
self.task_norm_tmp = {task_id: 0 for task_id in self.task_ids}
def freeze_layers(model):
fixed_layers = set(model.config.fixed_layers) # e.g. "bert.embeddings", "bert.v_embeddings.LayerNorm", "bert.encoder.layer.15.output.v_dense"
for key, value in dict(model.named_parameters()).items():
for name in fixed_layers:
if key.startswith(name):
value.requires_grad = False
def print_and_log(string, logger=None):
if logger is None:
print(string)
else:
logger.info(string)
def summary_parameters(model, logger=None):
"""
Summary Parameters of Model
:param model: torch.nn.module_name
:param logger: logger
:return: None
"""
print_and_log('>> Trainable Parameters:', logger)
trainable_paramters = [(str(n), str(v.dtype), str(tuple(v.shape)), str(v.numel()))
for n, v in model.named_parameters() if v.requires_grad]
max_lens = [max([len(item) + 4 for item in col]) for col in zip(*trainable_paramters)]
raw_format = '|' + '|'.join(['{{:{}s}}'.format(max_len) for max_len in max_lens]) + '|'
raw_split = '-' * (sum(max_lens) + len(max_lens) + 1)
print_and_log(raw_split, logger)
print_and_log(raw_format.format('Name', 'Dtype', 'Shape', '#Params'), logger)
print_and_log(raw_split, logger)
for name, dtype, shape, number in trainable_paramters:
print_and_log(raw_format.format(name, dtype, shape, number), logger)
print_and_log(raw_split, logger)
num_trainable_params = sum([v.numel() for v in model.parameters() if v.requires_grad])
total_params = sum([v.numel() for v in model.parameters()])
non_trainable_params = total_params - num_trainable_params
print_and_log('>> {:25s}\t{:.2f}\tM'.format('# TrainableParams:', num_trainable_params / (1.0 * 10 ** 6)), logger)
print_and_log('>> {:25s}\t{:.2f}\tM'.format('# NonTrainableParams:', non_trainable_params / (1.0 * 10 ** 6)), logger)
print_and_log('>> {:25s}\t{:.2f}\tM'.format('# TotalParams:', total_params / (1.0 * 10 ** 6)), logger)
def save(path, logger, epoch_id, model, optimizer, scheduler, global_step, tb_logger, default_gpu, score=None):
if default_gpu:
# Save a trained model
logger.info("** ** * Saving model * ** ** ")
model_to_save = model.module if hasattr(model, "module") else model # Only save the model it-self
output_model_file = os.path.join(path, "pytorch_model_" + str(epoch_id) + ".bin")
torch.save(model_to_save.state_dict(), output_model_file)
if score is not None:
output_model_file = os.path.join(path, "pytorch_model_best.bin")
torch.save(model_to_save.state_dict(), output_model_file)
output_checkpoint = os.path.join(path, "pytorch_ckpt_latest.tar")
torch.save(
{"model_state_dict": model_to_save.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"global_step": global_step,
"epoch_id": epoch_id,
"tb_logger": tb_logger,
"score": score,
},
output_checkpoint,
)
def resume(path, model, optimizer, scheduler, tb_logger):
start_iter_id = 0
global_step = 0
start_epoch = 0
best_score = float("-inf")
if path != "" and os.path.exists(path):
checkpoint = torch.load(path, map_location="cpu")
new_dict = {}
for attr in checkpoint["model_state_dict"]:
if attr.startswith("module."):
new_dict[attr.replace("module.", "", 1)] = checkpoint["model_state_dict"][attr]
else:
new_dict[attr] = checkpoint["model_state_dict"][attr]
model.load_state_dict(new_dict)
scheduler.load_state_dict(checkpoint.get("scheduler_state_dict")) #, checkpoint["warmup_scheduler_state_dict"]))
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
global_step = checkpoint["global_step"]
start_epoch = int(checkpoint["epoch_id"]) + 1
tb_logger = checkpoint["tb_logger"]
best_score = checkpoint.get("score", float("-inf"))
del checkpoint
return start_iter_id, global_step, start_epoch, tb_logger, best_score
| 46.961877
| 146
| 0.642625
|
669b85a63f2892044ea8c40315bca4a8f053453c
| 4,661
|
py
|
Python
|
tests/test_mbgdml_predict.py
|
keithgroup/mbGDML
|
a68b2a41c26c8e7e8e2f4527939c4564402f36bc
|
[
"MIT"
] | 6
|
2020-08-03T03:44:33.000Z
|
2022-02-24T21:50:03.000Z
|
tests/test_mbgdml_predict.py
|
keithgroup/mbGDML
|
a68b2a41c26c8e7e8e2f4527939c4564402f36bc
|
[
"MIT"
] | null | null | null |
tests/test_mbgdml_predict.py
|
keithgroup/mbGDML
|
a68b2a41c26c8e7e8e2f4527939c4564402f36bc
|
[
"MIT"
] | 1
|
2022-02-25T03:03:40.000Z
|
2022-02-25T03:03:40.000Z
|
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2020-2021, Alex M. Maldonado
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Tests for `mbgdml` package."""
import pytest
import numpy as np
import mbgdml.data as data
from mbgdml.predict import mbPredict
dset_dir = './tests/data/datasets'
model_dir = './tests/data/models'
molecule_sizes = {
'h2o': 3,
'mecn': 6,
'meoh': 6
}
def test_predict_single_16mer():
"""
"""
dset_16h2o_path = f'{dset_dir}/16h2o/16h2o.yoo.etal.boat.b-dset-mp2.def2tzvp.npz'
model_h2o_paths = [
f'{model_dir}/140h2o.sphere.gfn2.md.500k.prod1.3h2o.dset.1h2o-model-train500.npz',
f'{model_dir}/140h2o.sphere.gfn2.md.500k.prod1.3h2o.dset.2h2o.cm.6-model.mb-train500.npz',
f'{model_dir}/140h2o.sphere.gfn2.md.500k.prod1.3h2o-model.mb-train500.npz',
]
dset_16h2o = data.dataSet(dset_16h2o_path)
predict = mbPredict(model_h2o_paths)
E_predict, F_predict = predict.predict(
dset_16h2o.z, dset_16h2o.R, dset_16h2o.entity_ids, dset_16h2o.comp_ids,
ignore_criteria=False
)
E = np.array([-766368.03399751])
F = np.array([
[[ 0.29906572, 0.14785963, 0.24781407],
[-0.30412644, -0.72411633, -0.11358761],
[-0.49192677, 0.86896897, -0.67525678],
[ 0.36627638, 1.02869105, -2.56223656],
[-0.10503164, -0.89234795, 0.9294424,],
[-0.1841222, -0.14389019, 1.2193703,],
[-1.38995634, 1.74512784, 0.20352509],
[ 0.50352734, -1.84912139, -1.11214437],
[-0.45073645, -0.58830104, -0.0708215,],
[-0.05824096, -0.07168296, 3.05363522],
[-0.21573588, 0.55601679, -0.93232724],
[ 0.33556773, 0.3464968, -1.20999654],
[ 1.13396357, 0.64719014, -0.37314183],
[-0.14864126, -0.74782087, 0.92789942],
[ 0.25446292, 0.18875155, 0.35677525],
[ 1.18808078, 0.9989521, -1.70936528],
[-0.42772192, -0.23482216, 2.22942188],
[ 0.5023115, -0.2546999, 0.59431561],
[ 1.03039212, -0.27777061, 0.43893643],
[-1.6481248, -0.11736926, 0.39427926],
[-0.8270073, -1.08703941, -0.46220551],
[-1.65290086, -0.85447434, -0.25093955],
[ 2.38457849, -0.51709509, -0.97800052],
[ 0.70822521, 0.11395345, 1.4606325,],
[-0.49915379, 2.60146319, 1.20100891],
[-0.01957611, -1.61507913, -0.3507438,],
[-0.04340775, -0.95576235, -0.88557194],
[-0.1068999, -1.47361438, -0.57488098],
[ 0.10196448, 1.2622373, -0.57288566],
[ 0.46155007, 0.86992573, -0.07612512],
[-0.06659418, -1.53956909, -2.77945064],
[-0.30081568, 0.14797997, 0.90844867],
[ 0.38111199, 1.29149786, 0.63063523],
[ 0.27202453, 0.04869613, -1.44668878],
[ 0.03618388, -0.62330206, -1.39043361],
[-0.5954522, 0.61790128, 1.67910304],
[ 0.10622445, 0.31818432, 0.72714358],
[-0.48496294, 0.85814888, -0.29055761],
[-0.85844605, 0.18657187, -0.07795668],
[ 2.58353778, -0.54173036, 0.4635027,],
[-1.56162087, 0.12760808, 0.02244887],
[-0.65542649, 0.34366634, 0.19180049],
[-2.35675996, -1.09049215, 0.22829278],
[ 0.71868199, 0.072091, -0.36158273],
[ 1.55157057, 0.37661812, -0.25918432],
[-1.39910186, -0.24662851, 2.7263307,],
[ 1.55454091, 0.60506067, -1.08736517],
[ 0.3786482, 0.07707048, -0.23131207]]
])
assert np.allclose(E_predict, E)
assert np.allclose(F_predict, F, rtol=1e-04, atol=1e-02)
| 42.761468
| 98
| 0.625617
|
1a13ff100a6ba7f2c407d2a7cf0ac2a442f0cf67
| 17,600
|
py
|
Python
|
librosa/core/notation.py
|
jyun25/librosa
|
a297cac125175afe57fccbf5eecb65b79d088181
|
[
"0BSD"
] | 4,795
|
2016-05-12T04:39:33.000Z
|
2022-03-30T21:34:30.000Z
|
librosa/core/notation.py
|
jyun25/librosa
|
a297cac125175afe57fccbf5eecb65b79d088181
|
[
"0BSD"
] | 1,110
|
2016-05-12T16:56:48.000Z
|
2022-03-31T19:26:42.000Z
|
librosa/core/notation.py
|
jyun25/librosa
|
a297cac125175afe57fccbf5eecb65b79d088181
|
[
"0BSD"
] | 919
|
2016-05-12T09:17:06.000Z
|
2022-03-27T07:09:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Music notation utilities"""
import re
import numpy as np
from .._cache import cache
from ..util.exceptions import ParameterError
__all__ = [
"key_to_degrees",
"key_to_notes",
"mela_to_degrees",
"mela_to_svara",
"thaat_to_degrees",
"list_mela",
"list_thaat",
]
THAAT_MAP = dict(
bilaval=[0, 2, 4, 5, 7, 9, 11],
khamaj=[0, 2, 4, 5, 7, 9, 10],
kafi=[0, 2, 3, 5, 7, 9, 10],
asavari=[0, 2, 3, 5, 7, 8, 10],
bhairavi=[0, 1, 3, 5, 7, 8, 10],
kalyan=[0, 2, 4, 6, 7, 9, 11],
marva=[0, 1, 4, 6, 7, 9, 11],
poorvi=[0, 1, 4, 6, 7, 8, 11],
todi=[0, 1, 3, 6, 7, 8, 11],
bhairav=[0, 1, 4, 5, 7, 8, 11],
)
# Enumeration will start from 1
MELAKARTA_MAP = {
k: i
for i, k in enumerate(
[
"kanakangi",
"ratnangi",
"ganamurthi",
"vanaspathi",
"manavathi",
"tanarupi",
"senavathi",
"hanumathodi",
"dhenuka",
"natakapriya",
"kokilapriya",
"rupavathi",
"gayakapriya",
"vakulabharanam",
"mayamalavagaula",
"chakravakom",
"suryakantham",
"hatakambari",
"jhankaradhwani",
"natabhairavi",
"keeravani",
"kharaharapriya",
"gaurimanohari",
"varunapriya",
"mararanjini",
"charukesi",
"sarasangi",
"harikambhoji",
"dheerasankarabharanam",
"naganandini",
"yagapriya",
"ragavardhini",
"gangeyabhushani",
"vagadheeswari",
"sulini",
"chalanatta",
"salagam",
"jalarnavam",
"jhalavarali",
"navaneetham",
"pavani",
"raghupriya",
"gavambodhi",
"bhavapriya",
"subhapanthuvarali",
"shadvidhamargini",
"suvarnangi",
"divyamani",
"dhavalambari",
"namanarayani",
"kamavardhini",
"ramapriya",
"gamanasrama",
"viswambhari",
"syamalangi",
"shanmukhapriya",
"simhendramadhyamam",
"hemavathi",
"dharmavathi",
"neethimathi",
"kanthamani",
"rishabhapriya",
"latangi",
"vachaspathi",
"mechakalyani",
"chitrambari",
"sucharitra",
"jyotisvarupini",
"dhatuvardhini",
"nasikabhushani",
"kosalam",
"rasikapriya",
],
1,
)
}
def thaat_to_degrees(thaat):
"""Construct the svara indices (degrees) for a given thaat
Parameters
----------
thaat : str
The name of the thaat
Returns
-------
indices : np.ndarray
A list of the seven svara indices (starting from 0=Sa)
contained in the specified thaat
See Also
--------
key_to_degrees
mela_to_degrees
list_thaat
Examples
--------
>>> librosa.thaat_to_degrees('bilaval')
array([ 0, 2, 4, 5, 7, 9, 11])
>>> librosa.thaat_to_degrees('todi')
array([ 0, 1, 3, 6, 7, 8, 11])
"""
return np.asarray(THAAT_MAP[thaat.lower()])
def mela_to_degrees(mela):
"""Construct the svara indices (degrees) for a given melakarta raga
Parameters
----------
mela : str or int
Either the name or integer index ([1, 2, ..., 72]) of the melakarta raga
Returns
-------
degrees : np.ndarray
A list of the seven svara indices (starting from 0=Sa)
contained in the specified raga
See Also
--------
thaat_to_degrees
key_to_degrees
list_mela
Examples
--------
Melakarta #1 (kanakangi):
>>> librosa.mela_to_degrees(1)
array([0, 1, 2, 5, 7, 8, 9])
Or using a name directly:
>>> librosa.mela_to_degrees('kanakangi')
array([0, 1, 2, 5, 7, 8, 9])
"""
if isinstance(mela, str):
index = MELAKARTA_MAP[mela.lower()] - 1
elif 0 < mela <= 72:
index = mela - 1
else:
raise ParameterError("mela={} must be in range [1, 72]".format(mela))
# always have Sa [0]
degrees = [0]
# Fill in Ri and Ga
lower = index % 36
if 0 <= lower < 6:
# Ri1, Ga1
degrees.extend([1, 2])
elif 6 <= lower < 12:
# Ri1, Ga2
degrees.extend([1, 3])
elif 12 <= lower < 18:
# Ri1, Ga3
degrees.extend([1, 4])
elif 18 <= lower < 24:
# Ri2, Ga2
degrees.extend([2, 3])
elif 24 <= lower < 30:
# Ri2, Ga3
degrees.extend([2, 4])
else:
# Ri3, Ga3
degrees.extend([3, 4])
# Determine Ma
if index < 36:
# Ma1
degrees.append(5)
else:
# Ma2
degrees.append(6)
# always have Pa [7]
degrees.append(7)
# Determine Dha and Ni
upper = index % 6
if upper == 0:
# Dha1, Ni1
degrees.extend([8, 9])
elif upper == 1:
# Dha1, Ni2
degrees.extend([8, 10])
elif upper == 2:
# Dha1, Ni3
degrees.extend([8, 11])
elif upper == 3:
# Dha2, Ni2
degrees.extend([9, 10])
elif upper == 4:
# Dha2, Ni3
degrees.extend([9, 11])
else:
# Dha3, Ni3
degrees.extend([10, 11])
return np.array(degrees)
@cache(level=10)
def mela_to_svara(mela, abbr=True, unicode=True):
"""Spell the Carnatic svara names for a given melakarta raga
This function exists to resolve enharmonic equivalences between
pitch classes:
- Ri2 / Ga1
- Ri3 / Ga2
- Dha2 / Ni1
- Dha3 / Ni2
For svara outside the raga, names are chosen to preserve orderings
so that all Ri precede all Ga, and all Dha precede all Ni.
Parameters
----------
mela : str or int
the name or numerical index of the melakarta raga
abbr : bool
If `True`, use single-letter svara names: S, R, G, ...
If `False`, use full names: Sa, Ri, Ga, ...
unicode : bool
If `True`, use unicode symbols for numberings, e.g., Ri\u2081
If `False`, use low-order ASCII, e.g., Ri1.
Returns
-------
svara : list of strings
The svara names for each of the 12 pitch classes.
See Also
--------
key_to_notes
mela_to_degrees
list_mela
Examples
--------
Melakarta #1 (Kanakangi) uses R1, G1, D1, N1
>>> librosa.mela_to_svara(1)
['S', 'R₁', 'G₁', 'G₂', 'G₃', 'M₁', 'M₂', 'P', 'D₁', 'N₁', 'N₂', 'N₃']
#19 (Jhankaradhwani) uses R2 and G2 so the third svara are Ri:
>>> librosa.mela_to_svara(19)
['S', 'R₁', 'R₂', 'G₂', 'G₃', 'M₁', 'M₂', 'P', 'D₁', 'N₁', 'N₂', 'N₃']
#31 (Yagapriya) uses R3 and G3, so third and fourth svara are Ri:
>>> librosa.mela_to_svara(31)
['S', 'R₁', 'R₂', 'R₃', 'G₃', 'M₁', 'M₂', 'P', 'D₁', 'N₁', 'N₂', 'N₃']
#34 (Vagadheeswari) uses D2 and N2, so Ni1 becomes Dha2:
>>> librosa.mela_to_svara(34)
['S', 'R₁', 'R₂', 'R₃', 'G₃', 'M₁', 'M₂', 'P', 'D₁', 'D₂', 'N₂', 'N₃']
#36 (Chalanatta) uses D3 and N3, so Ni2 becomes Dha3:
>>> librosa.mela_to_svara(36)
['S', 'R₁', 'R₂', 'R₃', 'G₃', 'M₁', 'M₂', 'P', 'D₁', 'D₂', 'D₃', 'N₃']
# You can also query by raga name instead of index:
>>> librosa.mela_to_svara('chalanatta')
['S', 'R₁', 'R₂', 'R₃', 'G₃', 'M₁', 'M₂', 'P', 'D₁', 'D₂', 'D₃', 'N₃']
"""
# The following will be constant for all ragas
svara_map = [
"Sa",
"Ri\u2081",
None, # Ri2/Ga1
None, # Ri3/Ga2
"Ga\u2083",
"Ma\u2081",
"Ma\u2082",
"Pa",
"Dha\u2081",
None, # Dha2/Ni1
None, # Dha3/Ni2
"Ni\u2083",
]
if isinstance(mela, str):
mela_idx = MELAKARTA_MAP[mela.lower()] - 1
elif 0 < mela <= 72:
mela_idx = mela - 1
else:
raise ParameterError("mela={} must be in range [1, 72]".format(mela))
# Determine Ri2/Ga1
lower = mela_idx % 36
if lower < 6:
# First six will have Ri1/Ga1
svara_map[2] = "Ga\u2081"
else:
# All others have either Ga2/Ga3
# So we'll call this Ri2
svara_map[2] = "Ri\u2082"
# Determine Ri3/Ga2
if lower < 30:
# First thirty should get Ga2
svara_map[3] = "Ga\u2082"
else:
# Only the last six have Ri3
svara_map[3] = "Ri\u2083"
upper = mela_idx % 6
# Determine Dha2/Ni1
if upper == 0:
# these are the only ones with Ni1
svara_map[9] = "Ni\u2081"
else:
# Everyone else has Dha2
svara_map[9] = "Dha\u2082"
# Determine Dha3/Ni2
if upper == 5:
# This one has Dha3
svara_map[10] = "Dha\u2083"
else:
# Everyone else has Ni2
svara_map[10] = "Ni\u2082"
if abbr:
svara_map = [
s.translate(str.maketrans({"a": "", "h": "", "i": ""})) for s in svara_map
]
if not unicode:
svara_map = [
s.translate(str.maketrans({"\u2081": "1", "\u2082": "2", "\u2083": "3"}))
for s in svara_map
]
return list(svara_map)
def list_mela():
"""List melakarta ragas by name and index.
Melakarta raga names are transcribed from [#]_, with the exception of #45
(subhapanthuvarali).
.. [#] Bhagyalekshmy, S. (1990).
Ragas in Carnatic music.
South Asia Books.
Returns
-------
mela_map : dict
A dictionary mapping melakarta raga names to indices (1, 2, ..., 72)
Examples
--------
>>> librosa.list_mela()
{'kanakangi': 1,
'ratnangi': 2,
'ganamurthi': 3,
'vanaspathi': 4,
...}
See Also
--------
mela_to_degrees
mela_to_svara
list_thaat
"""
return MELAKARTA_MAP.copy()
def list_thaat():
"""List supported thaats by name.
Returns
-------
thaats : list
A list of supported thaats
Examples
--------
>>> librosa.list_thaat()
['bilaval',
'khamaj',
'kafi',
'asavari',
'bhairavi',
'kalyan',
'marva',
'poorvi',
'todi',
'bhairav']
See Also
--------
list_mela
thaat_to_degrees
"""
return list(THAAT_MAP.keys())
@cache(level=10)
def key_to_notes(key, unicode=True):
"""Lists all 12 note names in the chromatic scale, as spelled according to
a given key (major or minor).
This function exists to resolve enharmonic equivalences between different
spellings for the same pitch (e.g. C♯ vs D♭), and is primarily useful when producing
human-readable outputs (e.g. plotting) for pitch content.
Note names are decided by the following rules:
1. If the tonic of the key has an accidental (sharp or flat), that accidental will be
used consistently for all notes.
2. If the tonic does not have an accidental, accidentals will be inferred to minimize
the total number used for diatonic scale degrees.
3. If there is a tie (e.g., in the case of C:maj vs A:min), sharps will be preferred.
Parameters
----------
key : string
Must be in the form TONIC:key. Tonic must be upper case (``CDEFGAB``),
key must be lower-case (``maj`` or ``min``).
Single accidentals (``b!♭`` for flat, or ``#♯`` for sharp) are supported.
Examples: ``C:maj, Db:min, A♭:min``.
unicode: bool
If ``True`` (default), use Unicode symbols (♯𝄪♭𝄫)for accidentals.
If ``False``, Unicode symbols will be mapped to low-order ASCII representations::
♯ -> #, 𝄪 -> ##, ♭ -> b, 𝄫 -> bb
Returns
-------
notes : list
``notes[k]`` is the name for semitone ``k`` (starting from C)
under the given key. All chromatic notes (0 through 11) are
included.
See Also
--------
midi_to_note
Examples
--------
`C:maj` will use all sharps
>>> librosa.key_to_notes('C:maj')
['C', 'C♯', 'D', 'D♯', 'E', 'F', 'F♯', 'G', 'G♯', 'A', 'A♯', 'B']
`A:min` has the same notes
>>> librosa.key_to_notes('A:min')
['C', 'C♯', 'D', 'D♯', 'E', 'F', 'F♯', 'G', 'G♯', 'A', 'A♯', 'B']
`A♯:min` will use sharps, but spell note 0 (`C`) as `B♯`
>>> librosa.key_to_notes('A#:min')
['B♯', 'C♯', 'D', 'D♯', 'E', 'E♯', 'F♯', 'G', 'G♯', 'A', 'A♯', 'B']
`G♯:maj` will use a double-sharp to spell note 7 (`G`) as `F𝄪`:
>>> librosa.key_to_notes('G#:maj')
['B♯', 'C♯', 'D', 'D♯', 'E', 'E♯', 'F♯', 'F𝄪', 'G♯', 'A', 'A♯', 'B']
`F♭:min` will use double-flats
>>> librosa.key_to_notes('Fb:min')
['D𝄫', 'D♭', 'E𝄫', 'E♭', 'F♭', 'F', 'G♭', 'A𝄫', 'A♭', 'B𝄫', 'B♭', 'C♭']
"""
# Parse the key signature
match = re.match(
r"^(?P<tonic>[A-Ga-g])"
r"(?P<accidental>[#♯b!♭]?)"
r":(?P<scale>(maj|min)(or)?)$",
key,
)
if not match:
raise ParameterError("Improper key format: {:s}".format(key))
pitch_map = {"C": 0, "D": 2, "E": 4, "F": 5, "G": 7, "A": 9, "B": 11}
acc_map = {"#": 1, "": 0, "b": -1, "!": -1, "♯": 1, "♭": -1}
tonic = match.group("tonic").upper()
accidental = match.group("accidental")
offset = acc_map[accidental]
scale = match.group("scale")[:3].lower()
# Determine major or minor
major = scale == "maj"
# calculate how many clockwise steps we are on CoF (== # sharps)
if major:
tonic_number = ((pitch_map[tonic] + offset) * 7) % 12
else:
tonic_number = ((pitch_map[tonic] + offset) * 7 + 9) % 12
# Decide if using flats or sharps
# Logic here is as follows:
# 1. respect the given notation for the tonic.
# Sharp tonics will always use sharps, likewise flats.
# 2. If no accidental in the tonic, try to minimize accidentals.
# 3. If there's a tie for accidentals, use sharp for major and flat for minor.
if offset < 0:
# use flats explicitly
use_sharps = False
elif offset > 0:
# use sharps explicitly
use_sharps = True
elif 0 <= tonic_number < 6:
use_sharps = True
elif tonic_number > 6:
use_sharps = False
# Basic note sequences for simple keys
notes_sharp = ["C", "C♯", "D", "D♯", "E", "F", "F♯", "G", "G♯", "A", "A♯", "B"]
notes_flat = ["C", "D♭", "D", "E♭", "E", "F", "G♭", "G", "A♭", "A", "B♭", "B"]
# These apply when we have >= 6 sharps
sharp_corrections = [
(5, "E♯"),
(0, "B♯"),
(7, "F𝄪"),
(2, "C𝄪"),
(9, "G𝄪"),
(4, "D𝄪"),
(11, "A𝄪"),
]
# These apply when we have >= 6 flats
flat_corrections = [
(11, "C♭"),
(4, "F♭"),
(9, "B𝄫"),
(2, "E𝄫"),
(7, "A𝄫"),
(0, "D𝄫"),
] # last would be (5, 'G𝄫')
# Apply a mod-12 correction to distinguish B#:maj from C:maj
n_sharps = tonic_number
if tonic_number == 0 and tonic == "B":
n_sharps = 12
if use_sharps:
# This will only execute if n_sharps >= 6
for n in range(0, n_sharps - 6 + 1):
index, name = sharp_corrections[n]
notes_sharp[index] = name
notes = notes_sharp
else:
n_flats = (12 - tonic_number) % 12
# This will only execute if tonic_number <= 6
for n in range(0, n_flats - 6 + 1):
index, name = flat_corrections[n]
notes_flat[index] = name
notes = notes_flat
# Finally, apply any unicode down-translation if necessary
if not unicode:
translations = str.maketrans({"♯": "#", "𝄪": "##", "♭": "b", "𝄫": "bb"})
notes = list(n.translate(translations) for n in notes)
return notes
def key_to_degrees(key):
"""Construct the diatonic scale degrees for a given key.
Parameters
----------
key : str
Must be in the form TONIC:key. Tonic must be upper case (``CDEFGAB``),
key must be lower-case (``maj`` or ``min``).
Single accidentals (``b!♭`` for flat, or ``#♯`` for sharp) are supported.
Examples: ``C:maj, Db:min, A♭:min``.
Returns
-------
degrees : np.ndarray
An array containing the semitone numbers (0=C, 1=C#, ... 11=B)
for each of the seven scale degrees in the given key, starting
from the tonic.
See Also
--------
key_to_notes
Examples
--------
>>> librosa.key_to_degrees('C:maj')
array([ 0, 2, 4, 5, 7, 9, 11])
>>> librosa.key_to_degrees('C#:maj')
array([ 1, 3, 5, 6, 8, 10, 0])
>>> librosa.key_to_degrees('A:min')
array([ 9, 11, 0, 2, 4, 5, 7])
"""
notes = dict(
maj=np.array([0, 2, 4, 5, 7, 9, 11]), min=np.array([0, 2, 3, 5, 7, 8, 10])
)
match = re.match(
r"^(?P<tonic>[A-Ga-g])"
r"(?P<accidental>[#♯b!♭]?)"
r":(?P<scale>(maj|min)(or)?)$",
key,
)
if not match:
raise ParameterError("Improper key format: {:s}".format(key))
pitch_map = {"C": 0, "D": 2, "E": 4, "F": 5, "G": 7, "A": 9, "B": 11}
acc_map = {"#": 1, "": 0, "b": -1, "!": -1, "♯": 1, "♭": -1}
tonic = match.group("tonic").upper()
accidental = match.group("accidental")
offset = acc_map[accidental]
scale = match.group("scale")[:3].lower()
return (notes[scale] + pitch_map[tonic] + offset) % 12
| 25.360231
| 89
| 0.509545
|
314da09607550dc6e8552b044bfc1ac0aed88356
| 259
|
py
|
Python
|
accounting_app/accounting_app/doctype/purchase_invoice/purchase_invoice.py
|
nidhipurohit11/Accounting-App
|
e35dfef3720efab92e72ade91d9ef457f02ea4fe
|
[
"MIT"
] | 2
|
2021-06-02T08:01:59.000Z
|
2021-12-21T04:56:27.000Z
|
accounting_app/accounting_app/doctype/purchase_invoice/purchase_invoice.py
|
nidhipurohit11/Accounting-App
|
e35dfef3720efab92e72ade91d9ef457f02ea4fe
|
[
"MIT"
] | null | null | null |
accounting_app/accounting_app/doctype/purchase_invoice/purchase_invoice.py
|
nidhipurohit11/Accounting-App
|
e35dfef3720efab92e72ade91d9ef457f02ea4fe
|
[
"MIT"
] | 2
|
2021-06-02T08:01:59.000Z
|
2021-08-23T10:06:30.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Nidhi and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class PurchaseInvoice(Document):
pass
| 23.545455
| 49
| 0.776062
|
3b077bdea13188917c4591a6aed2b68fc01e9fc1
| 342
|
py
|
Python
|
opentelemetry-sdk/src/opentelemetry/sdk/_metrics/sdk_configuration.py
|
thedrow/opentelemetry-python
|
a659966b950ed055f270e8700f9e39e36e5c4eda
|
[
"Apache-2.0"
] | null | null | null |
opentelemetry-sdk/src/opentelemetry/sdk/_metrics/sdk_configuration.py
|
thedrow/opentelemetry-python
|
a659966b950ed055f270e8700f9e39e36e5c4eda
|
[
"Apache-2.0"
] | null | null | null |
opentelemetry-sdk/src/opentelemetry/sdk/_metrics/sdk_configuration.py
|
thedrow/opentelemetry-python
|
a659966b950ed055f270e8700f9e39e36e5c4eda
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from typing import Sequence
from opentelemetry.sdk._metrics.metric_reader import MetricReader
from opentelemetry.sdk.resources import Resource
@dataclass
class SdkConfiguration:
resource: Resource
# TODO: once views are added
# views: Sequence[View]
metric_readers: Sequence[MetricReader]
| 24.428571
| 65
| 0.80117
|
099aa3c0ea5c2ee6be77e5b358081c3ec474ab4b
| 9,055
|
py
|
Python
|
train_sentiment.py
|
HLTCHKUST/sentiment-lookahead
|
1c076b7c5c31b0f7c454720377db4e733838ebb2
|
[
"MIT"
] | 13
|
2020-02-19T03:33:37.000Z
|
2021-11-30T07:35:39.000Z
|
train_sentiment.py
|
HLTCHKUST/sentiment_lookahead
|
1c076b7c5c31b0f7c454720377db4e733838ebb2
|
[
"MIT"
] | 2
|
2020-10-14T01:59:55.000Z
|
2021-05-03T12:18:07.000Z
|
train_sentiment.py
|
HLTCHKUST/sentiment_lookahead
|
1c076b7c5c31b0f7c454720377db4e733838ebb2
|
[
"MIT"
] | 1
|
2020-06-17T16:27:36.000Z
|
2020-06-17T16:27:36.000Z
|
import os
import math
import random
import numpy as np
from sklearn.metrics import f1_score
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_pretrained_bert.optimization import BertAdam
from utils import constant, masked_cross_entropy
from utils.bleu import moses_multi_bleu
from utils.utils import get_metrics, save_ckpt, load_ckpt, save_model, load_model
def train_trace(model, dataloaders):
train_dataloader, dev_dataloader, test_dataloader = dataloaders
if(constant.USE_CUDA): model.cuda()
if constant.use_binary:
criterion = nn.BCEWithLogitsLoss()
else:
criterion = nn.MSELoss()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
opt = BertAdam(optimizer_grouped_parameters,
lr=constant.lr,
warmup=0.01,
t_total=int(len(train_dataloader) * 5))
best_dev = 10000
best_test = 10000
patience = 3
for e in range(constant.epochs):
model.train()
loss_log = []
f1_log = []
pbar = tqdm(enumerate(train_dataloader),total=len(train_dataloader))
for _, batch in pbar:
input_ids, input_masks, segment_ids, traces = batch
logits = model((input_ids, segment_ids, input_masks)).squeeze()
if len(logits.shape) == 0:
logits = logits.unsqueeze(0)
loss = criterion(logits, traces)
loss.backward()
opt.step()
opt.zero_grad()
## logging
loss_log.append(loss.item())
if constant.use_binary:
preds = F.sigmoid(logits) > 0.5
golds = traces.cpu().numpy()
else:
preds = logits > 0.5
golds = (traces > 0.5).cpu().numpy()
f1 = f1_score(golds, preds.detach().cpu().numpy(), average='weighted')
f1_log.append(f1)
pbar.set_description("(Epoch {}) TRAIN LOSS:{:.4f} TRAIN F1:{:.4f}".format(e+1, np.mean(loss_log), np.mean(f1_log)))
## LOG
dev_loss, dev_f1 = eval_trace(model, dev_dataloader)
test_loss, test_f1 = eval_trace(model, test_dataloader)
print("(Epoch {}) DEV LOSS: {:.4f} DEV F1:{:.4f} TEST LOSS: {:.4f} TEST F1:{:.4f} ".format(e+1, dev_loss, dev_f1, test_loss, test_f1))
print("(Epoch {}) BEST DEV LOSS: {:.4f} BEST TEST LOSS: {:.4f}".format(e+1, best_dev, best_test))
if(dev_loss < best_dev):
best_dev = dev_loss
best_test = test_loss
patience = 3
path = 'trained/data-{}.task-trace.loss-{}'
save_model(model, 'loss', best_dev, path.format(constant.data, best_dev))
else:
patience -= 1
if(patience == 0): break
if(best_dev == 0.0): break
print("BEST SCORES - DEV LOSS: {:.4f}, TEST LOSS: {:.4f}".format(best_dev, best_test))
def eval_trace(model, dataloader):
model.eval()
if constant.use_binary:
criterion = nn.BCEWithLogitsLoss()
else:
criterion = nn.MSELoss()
loss_log = []
f1_log = []
with torch.no_grad():
for batch in dataloader:
input_ids, input_masks, segment_ids, traces = batch
logits = model((input_ids, segment_ids, input_masks)).squeeze()
if len(logits.shape) == 0:
logits = logits.unsqueeze(0)
loss = criterion(logits, traces)
loss_log.append(loss.item())
if constant.use_binary:
preds = F.sigmoid(logits) > 0.5
golds = traces.cpu().numpy()
else:
preds = logits > 0.5
golds = (traces > 0.5).cpu().numpy()
f1 = f1_score(golds, preds.detach().cpu().numpy(), average='weighted')
f1_log.append(f1)
return np.mean(loss_log), np.mean(f1_log)
def train_sentiment(model, dataloaders):
"""
Training loop
Inputs:
model: the model to be trained
dataloader: data loader
Output:
best_dev: best f1 score on dev data
best_test: best f1 score on test data
"""
train_dataloader, dev_dataloader, test_dataloader = dataloaders
if(constant.USE_CUDA): model.cuda()
criterion = nn.BCEWithLogitsLoss()
if constant.use_bert:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
opt = BertAdam(optimizer_grouped_parameters,
lr=constant.lr,
warmup=0.01,
t_total=int(len(train_dataloader) * 5))
else:
opt = torch.optim.Adam(model.parameters(), lr=constant.lr)
best_dev = 0
best_test = 0
patience = 3
try:
for e in range(constant.epochs):
model.train()
loss_log = []
f1_log = []
pbar = tqdm(enumerate(train_dataloader),total=len(train_dataloader))
if constant.grid_search:
pbar = enumerate(train_dataloader)
else:
pbar = tqdm(enumerate(train_dataloader),total=len(train_dataloader))
for _, batch in pbar:
if constant.use_bert:
input_ids, input_masks, segment_ids, sentiments = batch
logits = model((input_ids, segment_ids, input_masks)).squeeze()
else:
sentences, lens, sentiments = batch
logits = model(sentences, lens).squeeze()
if len(logits.shape) == 0:
logits = logits.unsqueeze(0)
loss = criterion(logits, sentiments)
loss.backward()
opt.step()
opt.zero_grad()
## logging
loss_log.append(loss.item())
preds = F.sigmoid(logits) > 0.5
# preds = torch.argmax(logits, dim=1)
f1 = f1_score(sentiments.cpu().numpy(), preds.detach().cpu().numpy(), average='weighted')
f1_log.append(f1)
if not constant.grid_search:
pbar.set_description("(Epoch {}) TRAIN F1:{:.4f} TRAIN LOSS:{:.4f}".format(e+1, np.mean(f1_log), np.mean(loss_log)))
## LOG
f1 = eval_sentiment(model, dev_dataloader)
testF1 = eval_sentiment(model, test_dataloader)
print("(Epoch {}) DEV F1: {:.4f} TEST F1: {:.4f}".format(e+1, f1, testF1))
print("(Epoch {}) BEST DEV F1: {:.4f} BEST TEST F1: {:.4f}".format(e+1, best_dev, best_test))
if(f1 > best_dev):
best_dev = f1
best_test = testF1
patience = 3
path = 'trained/data-{}.task-sentiment.f1-{}'
save_model(model, 'loss', best_dev, path.format(constant.data, best_dev))
else:
patience -= 1
if(patience == 0): break
if(best_dev == 1.0): break
except KeyboardInterrupt:
if not constant.grid_search:
print("KEYBOARD INTERRUPT: Save CKPT and Eval")
save = True if input('Save ckpt? (y/n)\t') in ['y', 'Y', 'yes', 'Yes'] else False
if save:
save_path = save_ckpt(model, opt, e)
print("Saved CKPT path: ", save_path)
print("BEST SCORES - DEV F1: {:.4f}, TEST F1: {:.4f}".format(best_dev, best_test))
exit(1)
print("BEST SCORES - DEV F1: {:.4f}, TEST F1: {:.4f}".format(best_dev, best_test))
def eval_sentiment(model, dataloader):
model.eval()
preds = []
golds = []
with torch.no_grad():
for batch in dataloader:
if constant.use_bert:
input_ids, input_masks, segment_ids, sentiments = batch
logits = model((input_ids, segment_ids, input_masks)).squeeze()
else:
sentences, lens, sentiments = batch
logits = model(sentences, lens).squeeze()
pred = logits > 0.5
preds.append(pred.detach().cpu().numpy())
golds.append(sentiments.cpu().numpy())
preds = np.concatenate(preds)
golds = np.concatenate(golds)
f1 = f1_score(golds, preds, average='weighted')
# _, _, _, microF1 = get_metrics(pred, gold, verbose=False if constant.grid_search else True)
return f1
| 37.417355
| 142
| 0.564108
|
0fcc8436befac877fd08225ab4c8a7b9b58ebd30
| 2,159
|
py
|
Python
|
test.py
|
PanPapag/Context-Aware-Crowd-Counting
|
66a7f10c8e232d8b1f98389c240ba55b7b1d1297
|
[
"MIT"
] | 5
|
2020-11-22T08:07:22.000Z
|
2021-01-13T09:30:15.000Z
|
test.py
|
PanPapag/Context-Aware-Crowd-Counting
|
66a7f10c8e232d8b1f98389c240ba55b7b1d1297
|
[
"MIT"
] | 1
|
2021-03-04T10:32:26.000Z
|
2021-03-04T10:32:26.000Z
|
test.py
|
PanPapag/Context-Aware-Crowd-Counting
|
66a7f10c8e232d8b1f98389c240ba55b7b1d1297
|
[
"MIT"
] | null | null | null |
import argparse
import os
import matplotlib.pyplot as plt
import matplotlib.cm as CM
import torch
import torch.nn as nn
from torchvision import transforms
from model import CANNet
from dataset import ShanghaiTechPartA
def make_args_parser():
# create an ArgumentParser object
parser = argparse.ArgumentParser()
# fill parser with information about program arguments
parser.add_argument('-r', '--root', nargs='+', type=str,
default='/Users/pantelis/Downloads/archive/ShanghaiTech/part_A',
help='define the root path to dataset')
parser.add_argument('-d', '--device', nargs='+', type=str,
choices=['cuda', 'cpu'],
default='cpu',
help='define the device to train/test the model')
parser.add_argument('-c', '--checkpoint', nargs='+', type=str,
help='define the model\'s checkpoint')
parser.add_argument('-i', '--index', nargs='+', type=int,
help='define a random image index')
# return an ArgumentParser object
return parser.parse_args()
def predict_density_map(test_root, checkpoint_path, device, index):
model = CANNet().to(device)
model.load_state_dict(torch.load(checkpoint_path))
test_loader = torch.utils.data.DataLoader(
ShanghaiTechPartA(test_root,
transform=transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
), downsample=8),
batch_size=args.batch_size
)
model.eval()
for i, (img, density_map) in enumerate(test_loader):
if i == index:
img = img.to(device)
density_map = density_map.to(device)
est_density_map = model(img).detach()
est_density_map = est_density_map.squeeze(0).squeeze(0).cpu().numpy()
plt.imshow(est_density_map, cmap=CM.jet)
break
if __name__ == "__main__":
args = make_args_parser()
test_root = os.path.join(args.root, 'test_data', 'images')
predict_density_map(test_root, args.checkpoint, args.device, args.index)
| 37.877193
| 88
| 0.622974
|
ef6319b8f8bbaa62dd36532865bb6043aab2b16f
| 5,964
|
py
|
Python
|
ocs_ci/ocs/ui/validation_ui.py
|
keemano/ocs-ci
|
643ce73aceef3f92b695fda1bafdea83f6fb0402
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/ui/validation_ui.py
|
keemano/ocs-ci
|
643ce73aceef3f92b695fda1bafdea83f6fb0402
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/ui/validation_ui.py
|
keemano/ocs-ci
|
643ce73aceef3f92b695fda1bafdea83f6fb0402
|
[
"MIT"
] | null | null | null |
import logging
from ocs_ci.ocs.ui.base_ui import PageNavigator
from ocs_ci.ocs.ui.views import locators
from ocs_ci.utility.utils import get_ocp_version, TimeoutSampler
from ocs_ci.framework import config
from ocs_ci.ocs import constants
logger = logging.getLogger(__name__)
class ValidationUI(PageNavigator):
"""
User Interface Validation Selenium
"""
def __init__(self, driver):
super().__init__(driver)
self.ocp_version = get_ocp_version()
self.err_list = list()
self.validation_loc = locators[self.ocp_version]["validation"]
def verify_object_service_page(self):
"""
Verify Object Service Page UI
"""
self.navigate_overview_page()
self.do_click(self.validation_loc["object_service_tab"], enable_screenshot=True)
platform = config.ENV_DATA.get("platform").lower()
if platform in constants.ON_PREM_PLATFORMS:
logger.info("Click on Object Service button")
self.do_click(
self.validation_loc["object_service_button"], enable_screenshot=True
)
logger.info("Click on Data Resiliency button")
self.do_click(
self.validation_loc["data_resiliency_button"], enable_screenshot=True
)
strings_object_service_tab = ["Total Reads", "Total Writes"]
self.verify_page_contain_strings(
strings_on_page=strings_object_service_tab, page_name="object_service"
)
def verify_persistent_storage_page(self):
"""
Verify Persistent Storage Page
"""
self.navigate_overview_page()
self.do_click(
self.validation_loc["persistent_storage_tab"], enable_screenshot=True
)
strings_object_service_tab = [
"IOPS",
"Latency",
"Throughput",
"Recovery",
"Utilization",
"Used Capacity Breakdown",
"Raw Capacity",
]
self.verify_page_contain_strings(
strings_on_page=strings_object_service_tab, page_name="persistent_storage"
)
def verify_ocs_operator_tabs(self):
"""
Verify OCS Operator Tabs
"""
self.navigate_installed_operators_page()
logger.info("Search OCS operator installed")
self.do_send_keys(
locator=self.validation_loc["search_ocs_installed"],
text="OpenShift Container Storage",
)
logger.info("Click on ocs operator on Installed Operators")
self.do_click(
locator=self.validation_loc["ocs_operator_installed"],
enable_screenshot=True,
)
logger.info("Verify Details tab on OCS operator")
strings_details_tab = ["Description", "Succeeded", "openshift-storage"]
self.verify_page_contain_strings(
strings_on_page=strings_details_tab, page_name="details_tab"
)
logger.info("Verify Subscription tab on OCS operator")
self.do_click(
self.validation_loc["osc_subscription_tab"], enable_screenshot=True
)
strings_subscription_tab = [
"Healthy",
"openshift-storage",
]
self.verify_page_contain_strings(
strings_on_page=strings_subscription_tab, page_name="subscription_tab"
)
logger.info("Verify All instances tab on OCS operator")
self.do_click(
self.validation_loc["osc_all_instances_tab"], enable_screenshot=True
)
strings_all_instances_tab = ["Phase", "Ready", "Status"]
self.verify_page_contain_strings(
strings_on_page=strings_all_instances_tab, page_name="all_instances_tab"
)
logger.info("Verify Storage Cluster tab on OCS operator")
self.do_click(
self.validation_loc["osc_storage_cluster_tab"], enable_screenshot=True
)
strings_storage_cluster_tab = ["Phase", "Ready", "Status"]
self.verify_page_contain_strings(
strings_on_page=strings_storage_cluster_tab, page_name="storage_cluster_tab"
)
logger.info("Verify Backing Store tab on OCS operator")
self.do_click(
self.validation_loc["osc_backing_store_tab"], enable_screenshot=True
)
strings_backing_store_tab = ["Phase", "Ready", "Status"]
self.verify_page_contain_strings(
strings_on_page=strings_backing_store_tab, page_name="backing_store_tab"
)
logger.info("Verify Bucket Class tab on OCS operator")
self.do_click(
self.validation_loc["osc_bucket_class_tab"], enable_screenshot=True
)
strings_bucket_class_tab = ["Phase", "Ready", "Status"]
self.verify_page_contain_strings(
strings_on_page=strings_bucket_class_tab, page_name="bucket_class_tab"
)
def verify_page_contain_strings(self, strings_on_page, page_name):
"""
Verify Page Contain Strings
Args:
strings_on_page (list): list of strings on page
page_name (str): the name of the page
"""
logger.info(f"verify {strings_on_page} exist on {page_name}")
for string in strings_on_page:
sample = TimeoutSampler(
timeout=3,
sleep=1,
func=self.check_element_text,
expected_text=string,
)
if not sample.wait_for_func_status(result=True):
self.err_list.append(f"{string} string not found on {page_name}")
def verification_ui(self):
"""
Verification UI
"""
self.verify_object_service_page()
self.verify_persistent_storage_page()
self.verify_ocs_operator_tabs()
self.take_screenshot()
for err in self.err_list:
logger.error(err)
assert len(self.err_list) == 0, f"{self.err_list}"
| 34.674419
| 88
| 0.635983
|
2a8b2ccc049d3be77d8050739266608491b2ff94
| 17,067
|
py
|
Python
|
superset/views/utils.py
|
amitmiran137/incubator-superset
|
8593a13f00754973117beb0a95e7aca7fec4b00e
|
[
"Apache-2.0"
] | 30
|
2020-10-24T02:30:31.000Z
|
2022-03-29T23:21:48.000Z
|
superset/views/utils.py
|
amitmiran137/incubator-superset
|
8593a13f00754973117beb0a95e7aca7fec4b00e
|
[
"Apache-2.0"
] | 4
|
2021-03-02T01:53:30.000Z
|
2021-10-06T22:56:01.000Z
|
superset/views/utils.py
|
aerhuasi/superset
|
b4cd57b7bd2d87092ea50cfb2a38f27458195bf4
|
[
"Apache-2.0"
] | 9
|
2020-12-21T16:20:20.000Z
|
2022-03-18T06:04:37.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from collections import defaultdict
from datetime import date
from typing import Any, Callable, DefaultDict, Dict, List, Optional, Set, Tuple, Union
from urllib import parse
import msgpack
import pyarrow as pa
import simplejson as json
from flask import g, request
from flask_appbuilder.security.sqla import models as ab_models
from flask_appbuilder.security.sqla.models import User
import superset.models.core as models
from superset import app, dataframe, db, is_feature_enabled, result_set
from superset.connectors.connector_registry import ConnectorRegistry
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import SupersetException, SupersetSecurityException
from superset.legacy import update_time_range
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.models.sql_lab import Query
from superset.typing import FormData
from superset.utils.core import QueryStatus, TimeRangeEndpoint
from superset.utils.decorators import stats_timing
from superset.viz import BaseViz
logger = logging.getLogger(__name__)
stats_logger = app.config["STATS_LOGGER"]
if is_feature_enabled("SIP_38_VIZ_REARCHITECTURE"):
from superset import viz_sip38 as viz
else:
from superset import viz # type: ignore
REJECTED_FORM_DATA_KEYS: List[str] = []
if not app.config["ENABLE_JAVASCRIPT_CONTROLS"]:
REJECTED_FORM_DATA_KEYS = ["js_tooltip", "js_onclick_href", "js_data_mutator"]
def bootstrap_user_data(user: User, include_perms: bool = False) -> Dict[str, Any]:
if user.is_anonymous:
return {}
payload = {
"username": user.username,
"firstName": user.first_name,
"lastName": user.last_name,
"userId": user.id,
"isActive": user.is_active,
"createdOn": user.created_on.isoformat(),
"email": user.email,
}
if include_perms:
roles, permissions = get_permissions(user)
payload["roles"] = roles
payload["permissions"] = permissions
return payload
def get_permissions(
user: User,
) -> Tuple[Dict[str, List[List[str]]], DefaultDict[str, Set[str]]]:
if not user.roles:
raise AttributeError("User object does not have roles")
roles = {}
permissions = defaultdict(set)
for role in user.roles:
perms = set()
for perm in role.permissions:
if perm.permission and perm.view_menu:
perms.add((perm.permission.name, perm.view_menu.name))
if perm.permission.name in ("datasource_access", "database_access"):
permissions[perm.permission.name].add(perm.view_menu.name)
roles[role.name] = [
[perm.permission.name, perm.view_menu.name]
for perm in role.permissions
if perm.permission and perm.view_menu
]
return roles, permissions
def get_viz(
form_data: FormData, datasource_type: str, datasource_id: int, force: bool = False
) -> BaseViz:
viz_type = form_data.get("viz_type", "table")
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
viz_obj = viz.viz_types[viz_type](datasource, form_data=form_data, force=force)
return viz_obj
def get_form_data(
slice_id: Optional[int] = None, use_slice_data: bool = False
) -> Tuple[Dict[str, Any], Optional[Slice]]:
form_data = {}
request_form_data = request.form.get("form_data")
request_args_data = request.args.get("form_data")
if request_form_data:
form_data.update(json.loads(request_form_data))
# request params can overwrite the body
if request_args_data:
form_data.update(json.loads(request_args_data))
# Fallback to using the Flask globals (used for cache warmup) if defined.
if not form_data and hasattr(g, "form_data"):
form_data = getattr(g, "form_data")
url_id = request.args.get("r")
if url_id:
saved_url = db.session.query(models.Url).filter_by(id=url_id).first()
if saved_url:
url_str = parse.unquote_plus(
saved_url.url.split("?")[1][10:], encoding="utf-8"
)
url_form_data = json.loads(url_str)
# allow form_date in request override saved url
url_form_data.update(form_data)
form_data = url_form_data
form_data = {k: v for k, v in form_data.items() if k not in REJECTED_FORM_DATA_KEYS}
# When a slice_id is present, load from DB and override
# the form_data from the DB with the other form_data provided
slice_id = form_data.get("slice_id") or slice_id
slc = None
# Check if form data only contains slice_id, additional filters and viz type
valid_keys = ["slice_id", "extra_filters", "adhoc_filters", "viz_type"]
valid_slice_id = all(key in valid_keys for key in form_data)
# Include the slice_form_data if request from explore or slice calls
# or if form_data only contains slice_id and additional filters
if slice_id and (use_slice_data or valid_slice_id):
slc = db.session.query(Slice).filter_by(id=slice_id).one_or_none()
if slc:
slice_form_data = slc.form_data.copy()
slice_form_data.update(form_data)
form_data = slice_form_data
update_time_range(form_data)
if app.config["SIP_15_ENABLED"]:
form_data["time_range_endpoints"] = get_time_range_endpoints(
form_data, slc, slice_id
)
return form_data, slc
def get_datasource_info(
datasource_id: Optional[int], datasource_type: Optional[str], form_data: FormData
) -> Tuple[int, Optional[str]]:
"""
Compatibility layer for handling of datasource info
datasource_id & datasource_type used to be passed in the URL
directory, now they should come as part of the form_data,
This function allows supporting both without duplicating code
:param datasource_id: The datasource ID
:param datasource_type: The datasource type, i.e., 'druid' or 'table'
:param form_data: The URL form data
:returns: The datasource ID and type
:raises SupersetException: If the datasource no longer exists
"""
datasource = form_data.get("datasource", "")
if "__" in datasource:
datasource_id, datasource_type = datasource.split("__")
# The case where the datasource has been deleted
if datasource_id == "None":
datasource_id = None
if not datasource_id:
raise SupersetException(
"The datasource associated with this chart no longer exists"
)
datasource_id = int(datasource_id)
return datasource_id, datasource_type
def apply_display_max_row_limit(
sql_results: Dict[str, Any], rows: Optional[int] = None
) -> Dict[str, Any]:
"""
Given a `sql_results` nested structure, applies a limit to the number of rows
`sql_results` here is the nested structure coming out of sql_lab.get_sql_results, it
contains metadata about the query, as well as the data set returned by the query.
This method limits the number of rows adds a `displayLimitReached: True` flag to the
metadata.
:param sql_results: The results of a sql query from sql_lab.get_sql_results
:returns: The mutated sql_results structure
"""
display_limit = rows or app.config["DISPLAY_MAX_ROW"]
if (
display_limit
and sql_results["status"] == QueryStatus.SUCCESS
and display_limit < sql_results["query"]["rows"]
):
sql_results["data"] = sql_results["data"][:display_limit]
sql_results["displayLimitReached"] = True
return sql_results
def get_time_range_endpoints(
form_data: FormData, slc: Optional[Slice] = None, slice_id: Optional[int] = None
) -> Optional[Tuple[TimeRangeEndpoint, TimeRangeEndpoint]]:
"""
Get the slice aware time range endpoints from the form-data falling back to the SQL
database specific definition or default if not defined.
Note under certain circumstances the slice object may not exist, however the slice
ID may be defined which serves as a fallback.
When SIP-15 is enabled all new slices will use the [start, end) interval. If the
grace period is defined and has ended all slices will adhere to the [start, end)
interval.
:param form_data: The form-data
:param slc: The slice
:param slice_id: The slice ID
:returns: The time range endpoints tuple
"""
if (
app.config["SIP_15_GRACE_PERIOD_END"]
and date.today() >= app.config["SIP_15_GRACE_PERIOD_END"]
):
return (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.EXCLUSIVE)
endpoints = form_data.get("time_range_endpoints")
if (slc or slice_id) and not endpoints:
try:
_, datasource_type = get_datasource_info(None, None, form_data)
except SupersetException:
return None
if datasource_type == "table":
if not slc:
slc = db.session.query(Slice).filter_by(id=slice_id).one_or_none()
if slc:
endpoints = slc.datasource.database.get_extra().get(
"time_range_endpoints"
)
if not endpoints:
endpoints = app.config["SIP_15_DEFAULT_TIME_RANGE_ENDPOINTS"]
if endpoints:
start, end = endpoints
return (TimeRangeEndpoint(start), TimeRangeEndpoint(end))
return (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.EXCLUSIVE)
# see all dashboard components type in
# /superset-frontend/src/dashboard/util/componentTypes.js
CONTAINER_TYPES = ["COLUMN", "GRID", "TABS", "TAB", "ROW"]
def get_dashboard_extra_filters(
slice_id: int, dashboard_id: int
) -> List[Dict[str, Any]]:
session = db.session()
dashboard = session.query(Dashboard).filter_by(id=dashboard_id).one_or_none()
# is chart in this dashboard?
if (
dashboard is None
or not dashboard.json_metadata
or not dashboard.slices
or not any([slc for slc in dashboard.slices if slc.id == slice_id])
):
return []
try:
# does this dashboard have default filters?
json_metadata = json.loads(dashboard.json_metadata)
default_filters = json.loads(json_metadata.get("default_filters", "null"))
if not default_filters:
return []
# are default filters applicable to the given slice?
filter_scopes = json_metadata.get("filter_scopes", {})
layout = json.loads(dashboard.position_json or "{}")
if (
isinstance(layout, dict)
and isinstance(filter_scopes, dict)
and isinstance(default_filters, dict)
):
return build_extra_filters(layout, filter_scopes, default_filters, slice_id)
except json.JSONDecodeError:
pass
return []
def build_extra_filters(
layout: Dict[str, Dict[str, Any]],
filter_scopes: Dict[str, Dict[str, Any]],
default_filters: Dict[str, Dict[str, List[Any]]],
slice_id: int,
) -> List[Dict[str, Any]]:
extra_filters = []
# do not apply filters if chart is not in filter's scope or
# chart is immune to the filter
for filter_id, columns in default_filters.items():
scopes_by_filter_field = filter_scopes.get(filter_id, {})
for col, val in columns.items():
current_field_scopes = scopes_by_filter_field.get(col, {})
scoped_container_ids = current_field_scopes.get("scope", ["ROOT_ID"])
immune_slice_ids = current_field_scopes.get("immune", [])
for container_id in scoped_container_ids:
if slice_id not in immune_slice_ids and is_slice_in_container(
layout, container_id, slice_id
):
extra_filters.append({"col": col, "op": "in", "val": val})
return extra_filters
def is_slice_in_container(
layout: Dict[str, Dict[str, Any]], container_id: str, slice_id: int
) -> bool:
if container_id == "ROOT_ID":
return True
node = layout[container_id]
node_type = node.get("type")
if node_type == "CHART" and node.get("meta", {}).get("chartId") == slice_id:
return True
if node_type in CONTAINER_TYPES:
children = node.get("children", [])
return any(
is_slice_in_container(layout, child_id, slice_id) for child_id in children
)
return False
def is_owner(obj: Union[Dashboard, Slice], user: User) -> bool:
""" Check if user is owner of the slice """
return obj and user in obj.owners
def check_datasource_perms(
_self: Any,
datasource_type: Optional[str] = None,
datasource_id: Optional[int] = None,
) -> None:
"""
Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method.
:param datasource_type: The datasource type, i.e., 'druid' or 'table'
:param datasource_id: The datasource ID
:raises SupersetSecurityException: If the user cannot access the resource
"""
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException as ex:
raise SupersetSecurityException(
SupersetError(
error_type=SupersetErrorType.FAILED_FETCHING_DATASOURCE_INFO_ERROR,
level=ErrorLevel.ERROR,
message=str(ex),
)
)
if datasource_type is None:
raise SupersetSecurityException(
SupersetError(
error_type=SupersetErrorType.UNKNOWN_DATASOURCE_TYPE_ERROR,
level=ErrorLevel.ERROR,
message="Could not determine datasource type",
)
)
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
viz_obj.raise_for_access()
def check_slice_perms(_self: Any, slice_id: int) -> None:
"""
Check if user can access a cached response from slice_json.
This function takes `self` since it must have the same signature as the
the decorated method.
:param slice_id: The slice ID
:raises SupersetSecurityException: If the user cannot access the resource
"""
form_data, slc = get_form_data(slice_id, use_slice_data=True)
if slc:
viz_obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=False,
)
viz_obj.raise_for_access()
def _deserialize_results_payload(
payload: Union[bytes, str], query: Query, use_msgpack: Optional[bool] = False
) -> Dict[str, Any]:
logger.debug("Deserializing from msgpack: %r", use_msgpack)
if use_msgpack:
with stats_timing(
"sqllab.query.results_backend_msgpack_deserialize", stats_logger
):
ds_payload = msgpack.loads(payload, raw=False)
with stats_timing("sqllab.query.results_backend_pa_deserialize", stats_logger):
pa_table = pa.deserialize(ds_payload["data"])
df = result_set.SupersetResultSet.convert_table_to_df(pa_table)
ds_payload["data"] = dataframe.df_to_records(df) or []
db_engine_spec = query.database.db_engine_spec
all_columns, data, expanded_columns = db_engine_spec.expand_data(
ds_payload["selected_columns"], ds_payload["data"]
)
ds_payload.update(
{"data": data, "columns": all_columns, "expanded_columns": expanded_columns}
)
return ds_payload
with stats_timing("sqllab.query.results_backend_json_deserialize", stats_logger):
return json.loads(payload)
def get_cta_schema_name(
database: Database, user: ab_models.User, schema: str, sql: str
) -> Optional[str]:
func: Optional[Callable[[Database, ab_models.User, str, str], str]] = app.config[
"SQLLAB_CTAS_SCHEMA_NAME_FUNC"
]
if not func:
return None
return func(database, user, schema, sql)
| 34.478788
| 88
| 0.680553
|
0cd02843b299f94598bad77525945a9d1d44d1d9
| 36,377
|
py
|
Python
|
source/gui/__init__.py
|
ruifontes/nvda
|
8d8f60639e92627184151e2080052c7ec2730500
|
[
"bzip2-1.0.6"
] | 1,592
|
2015-11-10T12:05:44.000Z
|
2022-03-31T11:50:40.000Z
|
source/gui/__init__.py
|
ruifontes/nvda
|
8d8f60639e92627184151e2080052c7ec2730500
|
[
"bzip2-1.0.6"
] | 9,479
|
2015-11-10T20:56:48.000Z
|
2022-03-31T23:51:30.000Z
|
source/gui/__init__.py
|
ruifontes/nvda
|
8d8f60639e92627184151e2080052c7ec2730500
|
[
"bzip2-1.0.6"
] | 682
|
2015-11-10T11:19:23.000Z
|
2022-03-31T07:51:29.000Z
|
# -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2021 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Mesar Hameed, Joseph Lee,
# Thomas Stivers, Babbage B.V., Accessolutions, Julien Cochuyt
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import time
import os
import sys
import threading
import ctypes
import weakref
import wx
import wx.adv
import globalVars
import tones
import ui
from documentationUtils import getDocFilePath
from logHandler import log
import config
import versionInfo
import speech
import queueHandler
import core
from . import guiHelper
from .settingsDialogs import SettingsDialog
from .settingsDialogs import *
from .startupDialogs import WelcomeDialog
from .inputGestures import InputGesturesDialog
import speechDictHandler
from . import logViewer
import speechViewer
import winUser
import api
try:
import updateCheck
except RuntimeError:
updateCheck = None
### Constants
NVDA_PATH = globalVars.appDir
ICON_PATH=os.path.join(NVDA_PATH, "images", "nvda.ico")
DONATE_URL = "http://www.nvaccess.org/donate/"
### Globals
mainFrame = None
isInMessageBox = False
class MainFrame(wx.Frame):
def __init__(self):
style = wx.DEFAULT_FRAME_STYLE ^ wx.MAXIMIZE_BOX ^ wx.MINIMIZE_BOX | wx.FRAME_NO_TASKBAR
super(MainFrame, self).__init__(None, wx.ID_ANY, versionInfo.name, size=(1,1), style=style)
self.Bind(wx.EVT_CLOSE, self.onExitCommand)
self.sysTrayIcon = SysTrayIcon(self)
#: The focus before the last popup or C{None} if unknown.
#: This is only valid before L{prePopup} is called,
#: so it should be used as early as possible in any popup that needs it.
#: @type: L{NVDAObject}
self.prevFocus = None
#: The focus ancestors before the last popup or C{None} if unknown.
#: @type: list of L{NVDAObject}
self.prevFocusAncestors = None
# If NVDA has the uiAccess privilege, it can always set the foreground window.
import systemUtils
if not systemUtils.hasUiAccess():
# This makes Windows return to the previous foreground window and also seems to allow NVDA to be brought to the foreground.
self.Show()
self.Hide()
if winUser.isWindowVisible(self.Handle):
# HACK: Work around a wx bug where Hide() doesn't actually hide the window,
# but IsShown() returns False and Hide() again doesn't fix it.
# This seems to happen if the call takes too long.
self.Show()
self.Hide()
def prePopup(self):
"""Prepare for a popup.
This should be called before any dialog or menu which should pop up for the user.
L{postPopup} should be called after the dialog or menu has been shown.
@postcondition: A dialog or menu may be shown.
"""
nvdaPid = os.getpid()
focus = api.getFocusObject()
# Do not set prevFocus if the focus is on a control rendered by NVDA itself, such as the NVDA menu.
# This allows to refer to the control that had focus before opening the menu while still using NVDA
# on its own controls. The L{nvdaPid} check can be bypassed by setting the optional attribute
# L{isPrevFocusOnNvdaPopup} to L{True} when a NVDA dialog offers customizable bound gestures,
# eg. the NVDA Python Console.
if focus.processID != nvdaPid or getattr(focus, "isPrevFocusOnNvdaPopup", False):
self.prevFocus = focus
self.prevFocusAncestors = api.getFocusAncestors()
if winUser.getWindowThreadProcessID(winUser.getForegroundWindow())[0] != nvdaPid:
# This process is not the foreground process, so bring it to the foreground.
self.Raise()
def postPopup(self):
"""Clean up after a popup dialog or menu.
This should be called after a dialog or menu was popped up for the user.
"""
self.prevFocus = None
self.prevFocusAncestors = None
if not winUser.isWindowVisible(winUser.getForegroundWindow()):
# The current foreground window is invisible, so we want to return to the previous foreground window.
# Showing and hiding our main window seems to achieve this.
self.Show()
self.Hide()
def showGui(self):
# The menu pops up at the location of the mouse, which means it pops up at an unpredictable location.
# Therefore, move the mouse to the center of the screen so that the menu will always pop up there.
location = api.getDesktopObject().location
winUser.setCursorPos(*location.center)
self.evaluateUpdatePendingUpdateMenuItemCommand()
self.sysTrayIcon.onActivate(None)
def onRevertToSavedConfigurationCommand(self,evt):
queueHandler.queueFunction(queueHandler.eventQueue,core.resetConfiguration)
# Translators: Reported when last saved configuration has been applied by using revert to saved configuration option in NVDA menu.
queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Configuration applied"))
def onRevertToDefaultConfigurationCommand(self,evt):
queueHandler.queueFunction(queueHandler.eventQueue,core.resetConfiguration,factoryDefaults=True)
# Translators: Reported when configuration has been restored to defaults by using restore configuration to factory defaults item in NVDA menu.
queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Configuration restored to factory defaults"))
def onSaveConfigurationCommand(self,evt):
if globalVars.appArgs.secure:
# Translators: Reported when current configuration cannot be saved while NVDA is running in secure mode such as in Windows login screen.
queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Cannot save configuration - NVDA in secure mode"))
return
try:
config.conf.save()
# Translators: Reported when current configuration has been saved.
queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Configuration saved"))
except:
# Translators: Message shown when current configuration cannot be saved such as when running NVDA from a CD.
messageBox(_("Could not save configuration - probably read only file system"),_("Error"),wx.OK | wx.ICON_ERROR)
def _popupSettingsDialog(self, dialog, *args, **kwargs):
if isInMessageBox:
return
self.prePopup()
try:
dialog(self, *args, **kwargs).Show()
except SettingsDialog.MultiInstanceErrorWithDialog as errorWithDialog:
errorWithDialog.dialog.SetFocus()
except MultiCategorySettingsDialog.CategoryUnavailableError:
# Translators: Message shown when trying to open an unavailable category of a multi category settings dialog
# (example: when trying to open touch interaction settings on an unsupported system).
messageBox(_("The settings panel you tried to open is unavailable on this system."),_("Error"),style=wx.OK | wx.ICON_ERROR)
self.postPopup()
def onDefaultDictionaryCommand(self,evt):
# Translators: Title for default speech dictionary dialog.
self._popupSettingsDialog(DictionaryDialog,_("Default dictionary"),speechDictHandler.dictionaries["default"])
def onVoiceDictionaryCommand(self,evt):
# Translators: Title for voice dictionary for the current voice such as current eSpeak variant.
self._popupSettingsDialog(DictionaryDialog,_("Voice dictionary (%s)")%speechDictHandler.dictionaries["voice"].fileName,speechDictHandler.dictionaries["voice"])
def onTemporaryDictionaryCommand(self,evt):
# Translators: Title for temporary speech dictionary dialog (the voice dictionary that is active as long as NvDA is running).
self._popupSettingsDialog(DictionaryDialog,_("Temporary dictionary"),speechDictHandler.dictionaries["temp"])
def onExecuteUpdateCommand(self, evt):
if updateCheck and updateCheck.isPendingUpdate():
destPath, version, apiVersion, backCompatToAPIVersion = updateCheck.getPendingUpdate()
from addonHandler import getIncompatibleAddons
if any(getIncompatibleAddons(apiVersion, backCompatToAPIVersion)):
confirmUpdateDialog = updateCheck.UpdateAskInstallDialog(
parent=gui.mainFrame,
destPath=destPath,
version=version,
apiVersion=apiVersion,
backCompatTo=backCompatToAPIVersion
)
gui.runScriptModalDialog(confirmUpdateDialog)
else:
updateCheck.executePendingUpdate()
def evaluateUpdatePendingUpdateMenuItemCommand(self):
try:
self.sysTrayIcon.menu.Remove(self.sysTrayIcon.installPendingUpdateMenuItem)
except:
log.debug("Error while removing pending update menu item", exc_info=True)
pass
if not globalVars.appArgs.secure and updateCheck and updateCheck.isPendingUpdate():
self.sysTrayIcon.menu.Insert(self.sysTrayIcon.installPendingUpdateMenuItemPos,self.sysTrayIcon.installPendingUpdateMenuItem)
def onExitCommand(self, evt):
if config.conf["general"]["askToExit"]:
self.prePopup()
d = ExitDialog(self)
d.Raise()
d.Show()
self.postPopup()
else:
if not core.triggerNVDAExit():
log.error("NVDA already in process of exiting, this indicates a logic error.")
def onNVDASettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog)
def onGeneralSettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, GeneralSettingsPanel)
def onSelectSynthesizerCommand(self,evt):
self._popupSettingsDialog(SynthesizerSelectionDialog)
def onSpeechSettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, SpeechSettingsPanel)
def onSelectBrailleDisplayCommand(self,evt):
self._popupSettingsDialog(BrailleDisplaySelectionDialog)
def onBrailleSettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, BrailleSettingsPanel)
def onKeyboardSettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, KeyboardSettingsPanel)
def onMouseSettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, MouseSettingsPanel)
def onTouchInteractionCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, TouchInteractionPanel)
def onReviewCursorCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, ReviewCursorPanel)
def onInputCompositionCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, InputCompositionPanel)
def onObjectPresentationCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, ObjectPresentationPanel)
def onBrowseModeCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, BrowseModePanel)
def onDocumentFormattingCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, DocumentFormattingPanel)
def onUwpOcrCommand(self, evt):
self._popupSettingsDialog(NVDASettingsDialog, UwpOcrPanel)
def onSpeechSymbolsCommand(self, evt):
self._popupSettingsDialog(SpeechSymbolsDialog)
def onInputGesturesCommand(self, evt):
self._popupSettingsDialog(InputGesturesDialog)
def onAboutCommand(self,evt):
# Translators: The title of the dialog to show about info for NVDA.
messageBox(versionInfo.aboutMessage, _("About NVDA"), wx.OK)
def onCheckForUpdateCommand(self, evt):
updateCheck.UpdateChecker().check()
def onViewLogCommand(self, evt):
logViewer.activate()
def onSpeechViewerEnabled(self, isEnabled):
# its possible for this to be called after the sysTrayIcon is destroyed if we are exiting NVDA
if self.sysTrayIcon and self.sysTrayIcon.menu_tools_toggleSpeechViewer:
self.sysTrayIcon.menu_tools_toggleSpeechViewer.Check(isEnabled)
def onToggleSpeechViewerCommand(self, evt):
if not speechViewer.isActive:
speechViewer.activate()
else:
speechViewer.deactivate()
def onBrailleViewerChangedState(self, created):
# its possible for this to be called after the sysTrayIcon is destroyed if we are exiting NVDA
if self.sysTrayIcon and self.sysTrayIcon.menu_tools_toggleBrailleViewer:
self.sysTrayIcon.menu_tools_toggleBrailleViewer.Check(created)
def onToggleBrailleViewerCommand(self, evt):
import brailleViewer
if brailleViewer.isBrailleViewerActive():
brailleViewer.destroyBrailleViewer()
else:
brailleViewer.createBrailleViewerTool()
def onPythonConsoleCommand(self, evt):
import pythonConsole
if not pythonConsole.consoleUI:
pythonConsole.initialize()
pythonConsole.activate()
def onAddonsManagerCommand(self,evt):
if isInMessageBox:
return
self.prePopup()
from .addonGui import AddonsDialog
d=AddonsDialog(gui.mainFrame)
d.Show()
self.postPopup()
def onReloadPluginsCommand(self, evt):
import appModuleHandler, globalPluginHandler
from NVDAObjects import NVDAObject
appModuleHandler.reloadAppModules()
globalPluginHandler.reloadGlobalPlugins()
NVDAObject.clearDynamicClassCache()
def onCreatePortableCopyCommand(self,evt):
if isInMessageBox:
return
self.prePopup()
import gui.installerGui
d=gui.installerGui.PortableCreaterDialog(gui.mainFrame)
d.Show()
self.postPopup()
def onInstallCommand(self, evt):
if isInMessageBox:
return
from gui import installerGui
installerGui.showInstallGui()
def onRunCOMRegistrationFixesCommand(self, evt):
if isInMessageBox:
return
if gui.messageBox(
# Translators: A message to warn the user when starting the COM Registration Fixing tool
_("You are about to run the COM Registration Fixing tool. This tool will try to fix common system problems that stop NVDA from being able to access content in many programs including Firefox and Internet Explorer. This tool must make changes to the System registry and therefore requires administrative access. Are you sure you wish to proceed?"),
# Translators: The title of the warning dialog displayed when launching the COM Registration Fixing tool
_("Warning"),wx.YES|wx.NO|wx.ICON_WARNING,self
)==wx.NO:
return
progressDialog = IndeterminateProgressDialog(mainFrame,
# Translators: The title of the dialog presented while NVDA is running the COM Registration fixing tool
_("COM Registration Fixing Tool"),
# Translators: The message displayed while NVDA is running the COM Registration fixing tool
_("Please wait while NVDA tries to fix your system's COM registrations.")
)
try:
import systemUtils
systemUtils.execElevated(config.SLAVE_FILENAME, ["fixCOMRegistrations"])
except:
log.error("Could not execute fixCOMRegistrations command",exc_info=True)
progressDialog.done()
del progressDialog
messageBox(
_(
# Translators: The message displayed when the COM Registration Fixing tool completes.
"The COM Registration Fixing tool has finished. "
"It is highly recommended that you restart your computer now, to make sure the changes take full effect."
),
# Translators: The title of a dialog presented when the COM Registration Fixing tool is complete.
_("COM Registration Fixing Tool"),
wx.OK
)
def onConfigProfilesCommand(self, evt):
if isInMessageBox:
return
self.prePopup()
from .configProfiles import ProfilesDialog
ProfilesDialog(gui.mainFrame).Show()
self.postPopup()
class SysTrayIcon(wx.adv.TaskBarIcon):
def __init__(self, frame: MainFrame):
super(SysTrayIcon, self).__init__()
icon=wx.Icon(ICON_PATH,wx.BITMAP_TYPE_ICO)
self.SetIcon(icon, versionInfo.name)
self.menu=wx.Menu()
menu_preferences=self.preferencesMenu=wx.Menu()
item = menu_preferences.Append(wx.ID_ANY,
# Translators: The label for the menu item to open NVDA Settings dialog.
_("&Settings..."),
# Translators: The description for the menu item to open NVDA Settings dialog.
_("NVDA settings"))
self.Bind(wx.EVT_MENU, frame.onNVDASettingsCommand, item)
subMenu_speechDicts = wx.Menu()
if not globalVars.appArgs.secure:
item = subMenu_speechDicts.Append(
wx.ID_ANY,
# Translators: The label for the menu item to open Default speech dictionary dialog.
_("&Default dictionary..."),
# Translators: The help text for the menu item to open Default speech dictionary dialog.
_("A dialog where you can set default dictionary by adding dictionary entries to the list")
)
self.Bind(wx.EVT_MENU, frame.onDefaultDictionaryCommand, item)
item = subMenu_speechDicts.Append(
wx.ID_ANY,
# Translators: The label for the menu item to open Voice specific speech dictionary dialog.
_("&Voice dictionary..."),
_(
# Translators: The help text for the menu item
# to open Voice specific speech dictionary dialog.
"A dialog where you can set voice-specific dictionary by adding"
" dictionary entries to the list"
)
)
self.Bind(wx.EVT_MENU, frame.onVoiceDictionaryCommand, item)
item = subMenu_speechDicts.Append(
wx.ID_ANY,
# Translators: The label for the menu item to open Temporary speech dictionary dialog.
_("&Temporary dictionary..."),
# Translators: The help text for the menu item to open Temporary speech dictionary dialog.
_("A dialog where you can set temporary dictionary by adding dictionary entries to the edit box")
)
self.Bind(wx.EVT_MENU, frame.onTemporaryDictionaryCommand, item)
# Translators: The label for a submenu under NvDA Preferences menu to select speech dictionaries.
menu_preferences.AppendSubMenu(subMenu_speechDicts,_("Speech &dictionaries"))
if not globalVars.appArgs.secure:
# Translators: The label for the menu item to open Punctuation/symbol pronunciation dialog.
item = menu_preferences.Append(wx.ID_ANY, _("&Punctuation/symbol pronunciation..."))
self.Bind(wx.EVT_MENU, frame.onSpeechSymbolsCommand, item)
# Translators: The label for the menu item to open the Input Gestures dialog.
item = menu_preferences.Append(wx.ID_ANY, _("I&nput gestures..."))
self.Bind(wx.EVT_MENU, frame.onInputGesturesCommand, item)
# Translators: The label for Preferences submenu in NVDA menu.
self.menu.AppendSubMenu(menu_preferences,_("&Preferences"))
menu_tools = self.toolsMenu = wx.Menu()
if not globalVars.appArgs.secure:
# Translators: The label for the menu item to open NVDA Log Viewer.
item = menu_tools.Append(wx.ID_ANY, _("View log"))
self.Bind(wx.EVT_MENU, frame.onViewLogCommand, item)
# Translators: The label for the menu item to toggle Speech Viewer.
item = self.menu_tools_toggleSpeechViewer = menu_tools.AppendCheckItem(wx.ID_ANY, _("Speech viewer"))
item.Check(speechViewer.isActive)
self.Bind(wx.EVT_MENU, frame.onToggleSpeechViewerCommand, item)
self.menu_tools_toggleBrailleViewer: wx.MenuItem = menu_tools.AppendCheckItem(
wx.ID_ANY,
# Translators: The label for the menu item to toggle Braille Viewer.
_("Braille viewer")
)
item = self.menu_tools_toggleBrailleViewer
self.Bind(wx.EVT_MENU, frame.onToggleBrailleViewerCommand, item)
import brailleViewer
self.menu_tools_toggleBrailleViewer.Check(brailleViewer.isBrailleViewerActive())
brailleViewer.postBrailleViewerToolToggledAction.register(frame.onBrailleViewerChangedState)
if not globalVars.appArgs.secure and not config.isAppX:
# Translators: The label for the menu item to open NVDA Python Console.
item = menu_tools.Append(wx.ID_ANY, _("Python console"))
self.Bind(wx.EVT_MENU, frame.onPythonConsoleCommand, item)
# Translators: The label of a menu item to open the Add-ons Manager.
item = menu_tools.Append(wx.ID_ANY, _("Manage &add-ons..."))
self.Bind(wx.EVT_MENU, frame.onAddonsManagerCommand, item)
if not globalVars.appArgs.secure and not config.isAppX and getattr(sys,'frozen',None):
# Translators: The label for the menu item to create a portable copy of NVDA from an installed or another portable version.
item = menu_tools.Append(wx.ID_ANY, _("Create portable copy..."))
self.Bind(wx.EVT_MENU, frame.onCreatePortableCopyCommand, item)
if not config.isInstalledCopy():
# Translators: The label for the menu item to install NVDA on the computer.
item = menu_tools.Append(wx.ID_ANY, _("&Install NVDA..."))
self.Bind(wx.EVT_MENU, frame.onInstallCommand, item)
# Translators: The label for the menu item to run the COM registration fix tool
item = menu_tools.Append(wx.ID_ANY, _("Run COM Registration Fixing tool..."))
self.Bind(wx.EVT_MENU, frame.onRunCOMRegistrationFixesCommand, item)
if not config.isAppX:
# Translators: The label for the menu item to reload plugins.
item = menu_tools.Append(wx.ID_ANY, _("Reload plugins"))
self.Bind(wx.EVT_MENU, frame.onReloadPluginsCommand, item)
# Translators: The label for the Tools submenu in NVDA menu.
self.menu.AppendSubMenu(menu_tools,_("Tools"))
menu_help = self.helpMenu = wx.Menu()
if not globalVars.appArgs.secure:
# Translators: The label of a menu item to open NVDA user guide.
item = menu_help.Append(wx.ID_ANY, _("&User Guide"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("userGuide.html")), item)
# Translators: The label of a menu item to open the Commands Quick Reference document.
item = menu_help.Append(wx.ID_ANY, _("Commands &Quick Reference"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("keyCommands.html")), item)
# Translators: The label for the menu item to open What's New document.
item = menu_help.Append(wx.ID_ANY, _("What's &new"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("changes.html")), item)
item = menu_help.Append(wx.ID_ANY, _("NVDA &web site"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile("http://www.nvda-project.org/"), item)
# Translators: The label for the menu item to view NVDA License document.
item = menu_help.Append(wx.ID_ANY, _("L&icense"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("copying.txt", False)), item)
# Translators: The label for the menu item to view NVDA Contributors list document.
item = menu_help.Append(wx.ID_ANY, _("C&ontributors"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("contributors.txt", False)), item)
# Translators: The label for the menu item to open NVDA Welcome Dialog.
item = menu_help.Append(wx.ID_ANY, _("We&lcome dialog..."))
from .startupDialogs import WelcomeDialog
self.Bind(wx.EVT_MENU, lambda evt: WelcomeDialog.run(), item)
menu_help.AppendSeparator()
if updateCheck:
# Translators: The label of a menu item to manually check for an updated version of NVDA.
item = menu_help.Append(wx.ID_ANY, _("&Check for update..."))
self.Bind(wx.EVT_MENU, frame.onCheckForUpdateCommand, item)
# Translators: The label for the menu item to open About dialog to get information about NVDA.
item = menu_help.Append(wx.ID_ABOUT, _("About..."), _("About NVDA"))
self.Bind(wx.EVT_MENU, frame.onAboutCommand, item)
# Translators: The label for the Help submenu in NVDA menu.
self.menu.AppendSubMenu(menu_help,_("&Help"))
self.menu.AppendSeparator()
# Translators: The label for the menu item to open the Configuration Profiles dialog.
item = self.menu.Append(wx.ID_ANY, _("&Configuration profiles..."))
self.Bind(wx.EVT_MENU, frame.onConfigProfilesCommand, item)
# Translators: The label for the menu item to revert to saved configuration.
item = self.menu.Append(wx.ID_ANY, _("&Revert to saved configuration"),_("Reset all settings to saved state"))
self.Bind(wx.EVT_MENU, frame.onRevertToSavedConfigurationCommand, item)
if not globalVars.appArgs.secure:
# Translators: The label for the menu item to reset settings to default settings.
# Here, default settings means settings that were there when the user first used NVDA.
item = self.menu.Append(wx.ID_ANY, _("&Reset configuration to factory defaults"),_("Reset all settings to default state"))
self.Bind(wx.EVT_MENU, frame.onRevertToDefaultConfigurationCommand, item)
# Translators: The label for the menu item to save current settings.
item = self.menu.Append(wx.ID_SAVE, _("&Save configuration"), _("Write the current configuration to nvda.ini"))
self.Bind(wx.EVT_MENU, frame.onSaveConfigurationCommand, item)
self.menu.AppendSeparator()
# Translators: The label for the menu item to open donate page.
item = self.menu.Append(wx.ID_ANY, _("Donate"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(DONATE_URL), item)
self.installPendingUpdateMenuItemPos = self.menu.GetMenuItemCount()
item = self.installPendingUpdateMenuItem = self.menu.Append(wx.ID_ANY,
# Translators: The label for the menu item to run a pending update.
_("Install pending &update"),
# Translators: The description for the menu item to run a pending update.
_("Execute a previously downloaded NVDA update"))
self.Bind(wx.EVT_MENU, frame.onExecuteUpdateCommand, item)
self.menu.AppendSeparator()
item = self.menu.Append(wx.ID_EXIT, _("E&xit"),_("Exit NVDA"))
self.Bind(wx.EVT_MENU, frame.onExitCommand, item)
self.Bind(wx.adv.EVT_TASKBAR_LEFT_DOWN, self.onActivate)
self.Bind(wx.adv.EVT_TASKBAR_RIGHT_DOWN, self.onActivate)
def onActivate(self, evt):
mainFrame.prePopup()
import appModules.nvda
if not appModules.nvda.nvdaMenuIaIdentity:
# The NVDA app module doesn't know how to identify the NVDA menu yet.
# Signal that the NVDA menu has just been opened.
appModules.nvda.nvdaMenuIaIdentity = True
self.PopupMenu(self.menu)
if appModules.nvda.nvdaMenuIaIdentity is True:
# The NVDA menu didn't actually appear for some reason.
appModules.nvda.nvdaMenuIaIdentity = None
mainFrame.postPopup()
def initialize():
global mainFrame
if mainFrame:
raise RuntimeError("GUI already initialized")
mainFrame = MainFrame()
wxLang = core.getWxLangOrNone()
if wxLang:
# otherwise the system default will be used
mainFrame.SetLayoutDirection(wxLang.LayoutDirection)
wx.GetApp().SetTopWindow(mainFrame)
import monkeyPatches
monkeyPatches.applyWxMonkeyPatches(mainFrame, winUser, wx)
def terminate():
global mainFrame
mainFrame = None
def showGui():
wx.CallAfter(mainFrame.showGui)
def quit():
wx.CallAfter(mainFrame.onExitCommand, None)
def messageBox(message, caption=wx.MessageBoxCaptionStr, style=wx.OK | wx.CENTER, parent=None):
"""Display a message dialog.
This should be used for all message dialogs
rather than using C{wx.MessageDialog} and C{wx.MessageBox} directly.
@param message: The message text.
@type message: str
@param caption: The caption (title) of the dialog.
@type caption: str
@param style: Same as for wx.MessageBox.
@type style: int
@param parent: The parent window (optional).
@type parent: C{wx.Window}
@return: Same as for wx.MessageBox.
@rtype: int
"""
global isInMessageBox
wasAlready = isInMessageBox
isInMessageBox = True
if not parent:
mainFrame.prePopup()
res = wx.MessageBox(message, caption, style, parent or mainFrame)
if not parent:
mainFrame.postPopup()
if not wasAlready:
isInMessageBox = False
return res
def runScriptModalDialog(dialog, callback=None):
"""Run a modal dialog from a script.
This will not block the caller,
but will instead call C{callback} (if provided) with the result from the dialog.
The dialog will be destroyed once the callback has returned.
@param dialog: The dialog to show.
@type dialog: C{wx.Dialog}
@param callback: The optional callable to call with the result from the dialog.
@type callback: callable
"""
def run():
mainFrame.prePopup()
res = dialog.ShowModal()
mainFrame.postPopup()
if callback:
callback(res)
dialog.Destroy()
wx.CallAfter(run)
class ExitDialog(wx.Dialog):
_instance = None
def __new__(cls, parent):
# Make this a singleton.
inst = cls._instance() if cls._instance else None
if not inst:
return super(cls, cls).__new__(cls, parent)
return inst
def __init__(self, parent):
inst = ExitDialog._instance() if ExitDialog._instance else None
if inst:
return
# Use a weakref so the instance can die.
ExitDialog._instance = weakref.ref(self)
# Translators: The title of the dialog to exit NVDA
super(ExitDialog, self).__init__(parent, title=_("Exit NVDA"))
dialog = self
mainSizer = wx.BoxSizer(wx.VERTICAL)
contentSizerHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
if globalVars.appArgs.disableAddons:
# Translators: A message in the exit Dialog shown when all add-ons are disabled.
addonsDisabledText = _("All add-ons are now disabled. They will be re-enabled on the next restart unless you choose to disable them again.")
contentSizerHelper.addItem(wx.StaticText(self, wx.ID_ANY, label=addonsDisabledText))
# Translators: The label for actions list in the Exit dialog.
labelText=_("What would you like to &do?")
self.actions = [
# Translators: An option in the combo box to choose exit action.
_("Exit"),
# Translators: An option in the combo box to choose exit action.
_("Restart")
]
# Windows Store version of NVDA does not support add-ons yet.
if not config.isAppX:
# Translators: An option in the combo box to choose exit action.
self.actions.append(_("Restart with add-ons disabled"))
# Translators: An option in the combo box to choose exit action.
self.actions.append(_("Restart with debug logging enabled"))
if updateCheck and updateCheck.isPendingUpdate():
# Translators: An option in the combo box to choose exit action.
self.actions.append(_("Install pending update"))
self.actionsList = contentSizerHelper.addLabeledControl(labelText, wx.Choice, choices=self.actions)
self.actionsList.SetSelection(0)
contentSizerHelper.addDialogDismissButtons(wx.OK | wx.CANCEL)
self.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
mainSizer.Add(contentSizerHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.Sizer = mainSizer
self.actionsList.SetFocus()
self.CentreOnScreen()
def onOk(self, evt):
action=self.actionsList.GetSelection()
# Because Windows Store version of NVDA does not support add-ons yet, add 1 if action is 2 or above if this is such a case.
if action >= 2 and config.isAppX:
action += 1
if action == 0:
WelcomeDialog.closeInstances()
if not core.triggerNVDAExit():
log.error("NVDA already in process of exiting, this indicates a logic error.")
return # there's no need to destroy ExitDialog in this instance as triggerNVDAExit will do this
elif action == 1:
queueHandler.queueFunction(queueHandler.eventQueue,core.restart)
elif action == 2:
queueHandler.queueFunction(queueHandler.eventQueue,core.restart,disableAddons=True)
elif action == 3:
queueHandler.queueFunction(queueHandler.eventQueue,core.restart,debugLogging=True)
elif action == 4:
if updateCheck:
destPath, version, apiVersion, backCompatTo = updateCheck.getPendingUpdate()
from addonHandler import getIncompatibleAddons
if any(getIncompatibleAddons(currentAPIVersion=apiVersion, backCompatToAPIVersion=backCompatTo)):
confirmUpdateDialog = updateCheck.UpdateAskInstallDialog(
parent=gui.mainFrame,
destPath=destPath,
version=version,
apiVersion=apiVersion,
backCompatTo=backCompatTo
)
confirmUpdateDialog.ShowModal()
else:
updateCheck.executePendingUpdate()
wx.CallAfter(self.Destroy)
def onCancel(self, evt):
self.Destroy()
class ExecAndPump(threading.Thread):
"""Executes the given function with given args and kwargs in a background thread while blocking and pumping in the current thread."""
def __init__(self,func,*args,**kwargs):
self.func=func
self.args=args
self.kwargs=kwargs
fname = repr(func)
super().__init__(
name=f"{self.__class__.__module__}.{self.__class__.__qualname__}({fname})"
)
self.threadExc=None
self.start()
time.sleep(0.1)
threadHandle=ctypes.c_int()
threadHandle.value=ctypes.windll.kernel32.OpenThread(0x100000,False,self.ident)
msg=ctypes.wintypes.MSG()
while ctypes.windll.user32.MsgWaitForMultipleObjects(1,ctypes.byref(threadHandle),False,-1,255)==1:
while ctypes.windll.user32.PeekMessageW(ctypes.byref(msg),None,0,0,1):
ctypes.windll.user32.TranslateMessage(ctypes.byref(msg))
ctypes.windll.user32.DispatchMessageW(ctypes.byref(msg))
if self.threadExc:
raise self.threadExc
def run(self):
try:
self.func(*self.args,**self.kwargs)
except Exception as e:
self.threadExc=e
log.debugWarning("task had errors",exc_info=True)
class IndeterminateProgressDialog(wx.ProgressDialog):
def __init__(self, parent, title, message):
super(IndeterminateProgressDialog, self).__init__(title, message, parent=parent)
self._speechCounter = -1
self.timer = wx.PyTimer(self.Pulse)
self.timer.Start(1000)
self.Raise()
self.CentreOnScreen()
def Pulse(self):
super(IndeterminateProgressDialog, self).Pulse()
# We want progress to be spoken on the first pulse and every 10 pulses thereafter.
# Therefore, cycle from 0 to 9 inclusive.
self._speechCounter = (self._speechCounter + 1) % 10
pbConf = config.conf["presentation"]["progressBarUpdates"]
if pbConf["progressBarOutputMode"] == "off":
return
if not pbConf["reportBackgroundProgressBars"] and not self.IsActive():
return
if pbConf["progressBarOutputMode"] in ("beep", "both"):
tones.beep(440, 40)
if pbConf["progressBarOutputMode"] in ("speak", "both") and self._speechCounter == 0:
# Translators: Announced periodically to indicate progress for an indeterminate progress bar.
speech.speakMessage(_("Please wait"))
def IsActive(self):
#4714: In wxPython 3, ProgressDialog.IsActive always seems to return False.
return winUser.isDescendantWindow(winUser.getForegroundWindow(), self.Handle)
def done(self):
self.timer.Stop()
pbConf = config.conf["presentation"]["progressBarUpdates"]
if pbConf["progressBarOutputMode"] in ("beep", "both") and (pbConf["reportBackgroundProgressBars"] or self.IsActive()):
tones.beep(1760, 40)
self.Hide()
self.Destroy()
def shouldConfigProfileTriggersBeSuspended():
"""Determine whether configuration profile triggers should be suspended in relation to NVDA's GUI.
For NVDA configuration dialogs, the configuration should remain the same as it was before the GUI was popped up
so the user can change settings in the correct profile.
Top-level windows that require this behavior should have a C{shouldSuspendConfigProfileTriggers} attribute set to C{True}.
Because these dialogs are often opened via the NVDA menu, this applies to the NVDA menu as well.
"""
if winUser.getGUIThreadInfo(ctypes.windll.kernel32.GetCurrentThreadId()).flags & 0x00000010:
# The NVDA menu is active.
return True
for window in wx.GetTopLevelWindows():
if window.IsShown() and getattr(window, "shouldSuspendConfigProfileTriggers", False):
return True
return False
class NonReEntrantTimer(wx.Timer):
"""
Before WXPython 4, wx.Timer was nonre-entrant,
meaning that if code within its callback pumped messages (E.g. called wx.Yield) and this timer was ready to fire again,
the timer would not fire until the first callback had completed.
However, in WXPython 4, wx.Timer is now re-entrant.
Code in NVDA is not written to handle re-entrant timers, so this class provides a Timer with the old behaviour.
This should be used in place of wx.Timer and wx.PyTimer where the callback will directly or indirectly call wx.Yield or some how process the Windows window message queue.
For example, NVDA's core pump or other timers that run in NVDA's main thread.
Timers on braille display drivers for key detection don't need to use this as they only queue gestures rather than actually executing them.
"""
def __init__(self, run=None):
if run is not None:
self.run = run
self._inNotify = False
super(NonReEntrantTimer,self).__init__()
def run(self):
"""Subclasses can override or specify in constructor.
"""
raise NotImplementedError
def Notify(self):
if self._inNotify:
return
self._inNotify = True
try:
self.run()
finally:
self._inNotify = False
def _isDebug():
return config.conf["debugLog"]["gui"]
| 43.513158
| 351
| 0.74712
|
9da42c5d37e4e8b3e804f03b4aa0c97ec973371f
| 2,745
|
py
|
Python
|
PDFSentenceReader.py
|
Hicks48/pdf-word-analyzer
|
f903011070330e4bd999d0ecd25d95a887d92ec3
|
[
"Unlicense"
] | null | null | null |
PDFSentenceReader.py
|
Hicks48/pdf-word-analyzer
|
f903011070330e4bd999d0ecd25d95a887d92ec3
|
[
"Unlicense"
] | 14
|
2020-01-28T21:47:59.000Z
|
2022-03-11T23:23:50.000Z
|
PDFSentenceReader.py
|
Hicks48/pdf-word-analyzer
|
f903011070330e4bd999d0ecd25d95a887d92ec3
|
[
"Unlicense"
] | null | null | null |
from PDFTextReader import PDFTextReader
import re
class PDFSentenceReader:
def __init__(self):
self.temp_file = None
self.current_sentence = 0
self.current_character = 0
self.total_sentences = 0
def open(self, pdf_file_path, temp_file_path):
self._pdf_to_text(pdf_file_path, temp_file_path)
self.total_sentences = self._calculate_sentences_in_temp_file(temp_file_path)
self.temp_file = open(temp_file_path, 'r')
def next(self):
sentence = []
current_character = self.temp_file.read(1)
while current_character and current_character != '.':
self.current_character += 1
sentence.append(current_character)
current_character = self.temp_file.read(1)
self.current_character += 1
self.current_sentence += 1
if not current_character:
return None
return ''.join(sentence)
def get_position(self):
return { 'at-character': self.current_character, 'at-sentence': self.current_sentence, 'total-sentences': self.total_sentences }
def close(self):
self.temp_file.close()
def _calculate_sentences_in_temp_file(self, temp_file_path):
sentences = 0
temp_file = open(temp_file_path, 'r')
character = temp_file.read(1)
while character:
if character == '.':
sentences += 1
character = temp_file.read(1)
temp_file.close()
return sentences
def _pdf_to_text(self, pdf_file_path, temp_file_path):
# Write all text as formated to a file
reader = PDFTextReader(pdf_file_path)
temp_file = open(temp_file_path, 'w')
num_pages = reader.get_num_pages()
for i in range(num_pages):
temp_file.write(self._get_page_text(i, reader))
temp_file.close()
def _get_page_text(self, page_index, reader):
#temp2 = open('./data/temp2.txt', 'a')
text = reader.extract_page_text(page_index)
#temp2.write(text)
#temp2.close()
return self._format_text(text)
def all_text_to_single_column(self, text):
pass
def _format_text(self, text):
# Turn text to lower case
formated = text.lower()
# Remove all commas
formated = formated.replace(',', '')
# Combine senteces broken with linebreaks
formated = re.sub('\\s*\\-\\s*\n+\\s*', '', formated)
formated = re.sub('\\s*\n+\\-\\s*', '', formated)
# Replace all line breaks with spaces
formated = formated.replace('\n', ' ')
# Remove consecutive whitespaces and return
return re.sub(' +', ' ', formated)
| 27.45
| 136
| 0.614208
|
ab3b6e3081870eca0a0daca8d185c4c9a6075984
| 13,060
|
py
|
Python
|
regression/trans_maml.py
|
MichalisLazarou/cavia
|
ce476f2e1bc7899ae309c9ae9f23d236eca12555
|
[
"MIT"
] | null | null | null |
regression/trans_maml.py
|
MichalisLazarou/cavia
|
ce476f2e1bc7899ae309c9ae9f23d236eca12555
|
[
"MIT"
] | null | null | null |
regression/trans_maml.py
|
MichalisLazarou/cavia
|
ce476f2e1bc7899ae309c9ae9f23d236eca12555
|
[
"MIT"
] | null | null | null |
"""
Regression experiment using MAML
"""
import copy
import os
import time
import numpy as np
import scipy.stats as st
import torch
import torch.nn.functional as F
import torch.optim as optim
import utils
import tasks_sine, tasks_celebA
import maml_model as fsn
from logger import Logger
from maml_model import MamlModel, FCNet
from cavia_model import CaviaModel
def run(args, log_interval=5000, rerun=False):
assert args.maml
# see if we already ran this experiment
code_root = os.path.dirname(os.path.realpath(__file__))
if not os.path.isdir('{}/{}_result_files/'.format(code_root, args.task)):
os.mkdir('{}/{}_result_files/'.format(code_root, args.task))
path = '{}/{}_result_files/'.format(code_root, args.task) + utils.get_path_from_args(args)
if os.path.exists(path + '.pkl') and not rerun:
return utils.load_obj(path)
start_time = time.time()
# correctly seed everything
utils.set_seed(args.seed)
# --- initialise everything ---
# get the task family
if args.task == 'sine':
task_family_train = tasks_sine.RegressionTasksSinusoidal()
task_family_valid = tasks_sine.RegressionTasksSinusoidal()
task_family_test = tasks_sine.RegressionTasksSinusoidal()
elif args.task == 'celeba':
task_family_train = tasks_celebA.CelebADataset('train', args.device)
task_family_valid = tasks_celebA.CelebADataset('valid', args.device)
task_family_test = tasks_celebA.CelebADataset('test', args.device)
else:
raise NotImplementedError
#initialize transformer
transformer = FCNet(task_family_train.num_inputs, 3, 128, 128).to(args.device)
# initialise network
model_inner = MamlModel(128,
task_family_train.num_outputs,
n_weights=args.num_hidden_layers,
num_context_params=args.num_context_params,
device=args.device
).to(args.device)
model_outer = copy.deepcopy(model_inner)
print("MAML: ", model_outer)
print("Transformer: ", transformer)
# intitialise meta-optimiser
meta_optimiser = optim.Adam(model_outer.weights + model_outer.biases + [model_outer.task_context],
args.lr_meta)
opt_transformer = torch.optim.Adam(transformer.parameters(), 0.01)
# initialise loggers
logger = Logger()
logger.best_valid_model = copy.deepcopy(model_outer)
for i_iter in range(args.n_iter):
#meta_train_error = 0.0
# copy weights of network
copy_weights = [w.clone() for w in model_outer.weights]
copy_biases = [b.clone() for b in model_outer.biases]
copy_context = model_outer.task_context.clone()
# get all shared parameters and initialise cumulative gradient
meta_gradient = [0 for _ in range(len(copy_weights + copy_biases) + 1)]
# sample tasks
target_functions = task_family_train.sample_tasks(args.tasks_per_metaupdate)
for t in range(args.tasks_per_metaupdate):
#gradient initialization for transformer
acc_grads = fsn.phi_gradients(transformer, args.device)
# reset network weights
model_inner.weights = [w.clone() for w in copy_weights]
model_inner.biases = [b.clone() for b in copy_biases]
model_inner.task_context = copy_context.clone()
# get data for current task
train_inputs = task_family_train.sample_inputs(args.k_meta_train, args.use_ordered_pixels).to(args.device)
# get test data
test_inputs = task_family_train.sample_inputs(args.k_meta_test, args.use_ordered_pixels).to(args.device)
transformed_train_inputs = transformer(train_inputs)#.to(args.device)
transformed_test_inputs = transformer(test_inputs)#.to(args.device)
# transformer task loss
# with torch.no_grad():
targets0 = target_functions[t](train_inputs)
L0 = F.mse_loss(model_inner(transformed_train_inputs), targets0)
targets1 = target_functions[t](test_inputs)
L1 = F.mse_loss(model_inner(transformed_test_inputs), targets1)
trans_loss = fsn.cosine_loss(L0, L1, model_inner, args.device)
#trans_loss = evaluation_error + trans_loss
for step in range(args.num_inner_updates):
# print("iteration:" , i_iter, "innerstep: ", step)
outputs = model_inner(transformed_train_inputs)
# make prediction using the current model
#outputs = model_inner(train_inputs)
# get targets
targets = target_functions[t](train_inputs)
# ------------ update on current task ------------
# compute loss for current task
loss_task = F.mse_loss(outputs, targets)
# compute the gradient wrt current model
params = [w for w in model_inner.weights] + [b for b in model_inner.biases] + [model_inner.task_context]
grads = torch.autograd.grad(loss_task, params, create_graph=True, retain_graph=True)
# make an update on the inner model using the current model (to build up computation graph)
for i in range(len(model_inner.weights)):
if not args.first_order:
model_inner.weights[i] = model_inner.weights[i] - args.lr_inner * grads[i].clamp_(-10, 10)
else:
model_inner.weights[i] = model_inner.weights[i] - args.lr_inner * grads[i].detach().clamp_(-10, 10)
for j in range(len(model_inner.biases)):
if not args.first_order:
model_inner.biases[j] = model_inner.biases[j] - args.lr_inner * grads[i + j + 1].clamp_(-10, 10)
else:
model_inner.biases[j] = model_inner.biases[j] - args.lr_inner * grads[i + j + 1].detach().clamp_(-10, 10)
if not args.first_order:
model_inner.task_context = model_inner.task_context - args.lr_inner * grads[i + j + 2].clamp_(-10, 10)
else:
model_inner.task_context = model_inner.task_context - args.lr_inner * grads[i + j + 2].detach().clamp_(-10, 10)
# ------------ compute meta-gradient on test loss of current task ------------
# get outputs after update
test_outputs = model_inner(transformed_test_inputs)
# get the correct targets
test_targets = target_functions[t](test_inputs)
# compute loss (will backprop through inner loop)
loss_meta = F.mse_loss(test_outputs, test_targets)
#meta_train_error += loss_meta.item()
# transformer gradients
trans_loss = loss_meta
grads_phi = list(torch.autograd.grad(trans_loss, transformer.parameters(), retain_graph=True, create_graph=True))
for p, l in zip(acc_grads, grads_phi):
l = l
p.data = torch.add(p, (1 / args.tasks_per_metaupdate), l.detach().clamp_(-10,10))
# compute gradient w.r.t. *outer model*
task_grads = torch.autograd.grad(loss_meta,
model_outer.weights + model_outer.biases + [model_outer.task_context])
for i in range(len(model_inner.weights + model_inner.biases) + 1):
meta_gradient[i] += task_grads[i].detach().clamp_(-10, 10)
# ------------ meta update ------------
opt_transformer.zero_grad()
meta_optimiser.zero_grad()
# parameter gradient attributes of transformer updated
for k, p in zip(transformer.parameters(), acc_grads):
k.grad = p
# print(meta_gradient)
# assign meta-gradient
for i in range(len(model_outer.weights)):
model_outer.weights[i].grad = meta_gradient[i] / args.tasks_per_metaupdate
meta_gradient[i] = 0
for j in range(len(model_outer.biases)):
model_outer.biases[j].grad = meta_gradient[i + j + 1] / args.tasks_per_metaupdate
meta_gradient[i + j + 1] = 0
model_outer.task_context.grad = meta_gradient[i + j + 2] / args.tasks_per_metaupdate
meta_gradient[i + j + 2] = 0
# do update step on outer model
meta_optimiser.step()
opt_transformer.step()
# ------------ logging ------------
if i_iter % log_interval == 0:# and i_iter > 0:
# evaluate on training set
loss_mean, loss_conf = eval(args, copy.copy(model_outer), task_family=task_family_train,
num_updates=args.num_inner_updates, transformer=transformer)
logger.train_loss.append(loss_mean)
logger.train_conf.append(loss_conf)
# evaluate on test set
loss_mean, loss_conf = eval(args, copy.copy(model_outer), task_family=task_family_valid,
num_updates=args.num_inner_updates, transformer=transformer)
logger.valid_loss.append(loss_mean)
logger.valid_conf.append(loss_conf)
# evaluate on validation set
loss_mean, loss_conf = eval(args, copy.copy(model_outer), task_family=task_family_test,
num_updates=args.num_inner_updates, transformer=transformer)
logger.test_loss.append(loss_mean)
logger.test_conf.append(loss_conf)
# save logging results
utils.save_obj(logger, path)
# save best model
if logger.valid_loss[-1] == np.min(logger.valid_loss):
print('saving best model at iter', i_iter)
logger.best_valid_model = copy.copy(model_outer)
# visualise results
if args.task == 'celeba':
task_family_train.visualise(task_family_train, task_family_test, copy.copy(logger.best_valid_model),
args, i_iter, transformer)
# print current results
logger.print_info(i_iter, start_time)
start_time = time.time()
return logger
def eval(args, model, task_family, num_updates, transformer, n_tasks=100, return_gradnorm=False):
# copy weights of network
copy_weights = [w.clone() for w in model.weights]
copy_biases = [b.clone() for b in model.biases]
copy_context = model.task_context.clone()
# get the task family (with infinite number of tasks)
input_range = task_family.get_input_range().to(args.device)
# logging
losses = []
gradnorms = []
# --- inner loop ---
for t in range(n_tasks):
# reset network weights
model.weights = [w.clone() for w in copy_weights]
model.biases = [b.clone() for b in copy_biases]
model.task_context = copy_context.clone()
# sample a task
target_function = task_family.sample_task()
# get data for current task
curr_inputs = task_family.sample_inputs(args.k_shot_eval, args.use_ordered_pixels).to(args.device)
curr_targets = target_function(curr_inputs)
# ------------ update on current task ------------
for _ in range(1, num_updates + 1):
curr_outputs = model(transformer(curr_inputs))
# compute loss for current task
task_loss = F.mse_loss(curr_outputs, curr_targets)
# update task parameters
params = [w for w in model.weights] + [b for b in model.biases] + [model.task_context]
grads = torch.autograd.grad(task_loss, params)
gradnorms.append(np.mean(np.array([g.norm().item() for g in grads])))
for i in range(len(model.weights)):
model.weights[i] = model.weights[i] - args.lr_inner * grads[i].detach().clamp_(-10, 10)
for j in range(len(model.biases)):
model.biases[j] = model.biases[j] - args.lr_inner * grads[i + j + 1].detach().clamp_(-10, 10)
model.task_context = model.task_context - args.lr_inner * grads[i + j + 2].detach().clamp_(-10, 10)
# ------------ logging ------------
# compute true loss on entire input range
losses.append(F.mse_loss(model(transformer(input_range)), target_function(input_range)).detach().item())
# reset network weights
model.weights = [w.clone() for w in copy_weights]
model.biases = [b.clone() for b in copy_biases]
model.task_context = copy_context.clone()
losses_mean = np.mean(losses)
losses_conf = st.t.interval(0.95, len(losses) - 1, loc=losses_mean, scale=st.sem(losses))
if not return_gradnorm:
return losses_mean, np.mean(np.abs(losses_conf - losses_mean))
else:
return losses_mean, np.mean(np.abs(losses_conf - losses_mean)), np.mean(gradnorms)
| 41.72524
| 131
| 0.617841
|
51cc815099bd7827c399b6b1856eea5b7496dd90
| 10,098
|
py
|
Python
|
cogs/levelling.py
|
londarks/Athus-Discord-Bot
|
720a8c0a2280077712b8d954e7d48dec366953dd
|
[
"MIT"
] | 3
|
2020-04-04T11:41:19.000Z
|
2021-06-02T22:56:27.000Z
|
cogs/levelling.py
|
londarks/Athus-Discord-Bot
|
720a8c0a2280077712b8d954e7d48dec366953dd
|
[
"MIT"
] | null | null | null |
cogs/levelling.py
|
londarks/Athus-Discord-Bot
|
720a8c0a2280077712b8d954e7d48dec366953dd
|
[
"MIT"
] | null | null | null |
import sqlite3
import discord
from discord.ext import commands
import asyncio
import requests
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont, ImageOps
import time
class Levelling(commands.Cog):
def __init__(self,client):
self.client = client
@commands.Cog.listener()
async def on_message(self, message):
connection = sqlite3.connect('users.db')
result = connection.cursor()
if message.author.bot == True:
return
author = message.author.id
result.execute('SELECT discord_id From usuarios WHERE discord_id="%s"' % (author))
check_db=result.fetchone()
if check_db is None:
await self.update_data(author)
else:
result.execute('SELECT experience From usuarios WHERE discord_id="%s"' % (author))
experience_user=result.fetchone()
soma_ = (experience_user[0] + 5)
result.execute("""
UPDATE usuarios
SET experience = ?
WHERE discord_id = ?
""", (soma_,author))
connection.commit()
await self.level_up(author, message.channel.id, message.author.id,message.author.avatar_url)
async def update_data(self,user):
connection = sqlite3.connect('users.db')
cursor = connection.cursor()
_id = user
experience = 0
level = 1
Cash= 0
sapphire = 0
Reputation = 0
badges_1 = 'img/badges/novice_badges.png'
badges_2 = 'img/badges/None_badges.png'
badges_3 = 'img/badges/None_badges.png'
badges_4 = 'img/badges/None_badges.png'
badges_5 = 'img/badges/None_badges.png'
badges_6 = 'img/badges/None_badges.png'
Background = 'img/background/theme_0_backgroun.png'
#fazendo registro de usuarios
cursor.execute("INSERT INTO usuarios (discord_id,experience,level,cash,sapphire,reputation,badges_1,badges_2,badges_3,badges_4,badges_5,badges_6,background) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)", (_id,experience,level,Cash,sapphire,Reputation,badges_1,badges_2,badges_3,badges_4,badges_5,badges_6,Background))
connection.commit()
item_id = 1
nome_item = 'novice'
tipo_item = 'badges'
cursor.execute("INSERT INTO inventario (id_usuario, item_id,nome_item,tipo_item) VALUES (?,?,?,?)", (user,item_id,nome_item,tipo_item))
connection.commit()
cursor.execute("INSERT INTO rank (user_id,level) VALUES (?,?)", (user,level))
connection.commit()
item_id = 2
nome_item = 'theme_0'
tipo_item = 'theme'
cursor.execute("INSERT INTO inventario (id_usuario, item_id,nome_item,tipo_item) VALUES (?,?,?,?)", (user,item_id,nome_item,tipo_item))
connection.commit()
async def level_up(self,user,canal, person,img):
list_lvl = [10,20,30,40,50,60,70,80,90,100]
connection = sqlite3.connect('users.db')
cursor = connection.cursor()
cursor.execute('SELECT experience,level From usuarios WHERE discord_id="%s"' % (user))
add_lvl = cursor.fetchone()
info_player = []
for add in add_lvl:
info_player.append(add)
experience = info_player[0]
lvl_start = info_player[1]
lvl_end = int(experience ** (1 / 4))
if lvl_start < lvl_end:
cursor.execute("""UPDATE usuarios SET level = ? WHERE discord_id = ?""",(lvl_end,user))
connection.commit()
channel = self.client.get_channel(canal)
#await self.level_card(img,lvl_end,canal)
await channel.send(f'<@!{person}> Você subio de Level.!!')
cursor.execute("""
UPDATE rank
SET level = ?
WHERE user_id = ?
""", (lvl_end,person))
connection.commit()
#badges rank
#gera um swit para passar os numeros para o nome da badges
# switcher = {
# 10:"rank10",
# 9: "rank9",
# 8: "rank8",
# 7: "rank7",
# 6: "rank6",
# 5: "rank5",
# 4: "rank4",
# 3: "rank3",
# 2: "rank2",
# 1: "rank1"
# }
# g = 1
# #verifica se o usuario faz parte do top 10
# cursor.execute('SELECT * From rank ORDER BY level AND level DESC LIMIT 10')
# rank=cursor.fetchall()
# print(rank)
# for r in range(len(rank)):
# if user == rank[r][0]:
# rank_switcher = switcher.get(g)
# print(rank_switcher)
# print("entrei")
# #fazendo check-up na backpack
# cursor.execute('SELECT nome_item From inventario WHERE id_usuario="%s" AND tipo_item="badges"'%(user))
# comquistas = cursor.fetchall()
# print(comquistas)
# print(len(comquistas))
# for l in range(len(comquistas)):
# print(comquistas[l][0])
# if comquistas[l][0] == rank_switcher:
# print("entro")
# break
# else:
# item_id = 1
# nome_item = 'rank{}'.format(g)
# tipo_item = 'badges'
# cursor.execute("INSERT INTO inventario (id_usuario, item_id,nome_item,tipo_item) VALUES (?,?,?,?)", (user,item_id,nome_item,tipo_item))
# connection.commit()
# await channel.send("<@!{}>Você recebeu uma badges por estar: rank{}".format(user,g))
# return
# g += 1
"""Adiciona Badges por leveles ele faz umas rapida pesquisa
na sua bacpack e diz se você tem o item ou não caso tenha ele quebra o codigo com
um return para nao adicionar mais nada e essa função so será chamada outra vez caso
você pegue um level == da tabela list_lvl """
for i in range(len(list_lvl)):
if lvl_end == list_lvl[i]:
nome_item = 'lvl{}'.format(lvl_end)
cursor.execute('SELECT nome_item From inventario WHERE id_usuario="%s" AND tipo_item="badges"'% (user))
comquistas = cursor.fetchall()
for j in range(len(comquistas)):
if comquistas[j][0] == nome_item:
return
await channel.send("<@!{}> Você ganhou por passar para o nivel{}".format(person,lvl_end))
item_id = 1
tipo_item = 'badges'
cursor.execute("INSERT INTO inventario (id_usuario, item_id,nome_item,tipo_item) VALUES (?,?,?,?)", (user,item_id,nome_item,tipo_item))
connection.commit()
"""comando para adicionar badges no lvl30 não precisa de nenhum tipo de checagem pois você
so upa level 30 uma vez e esse codigo so será execultado somente uma vez"""
if lvl_end == 30:
await channel.send("<@!{}> Você ganhou uma badges por ser um veterano".format(person))
item_id = 1
nome_item = 'veterano'
tipo_item = 'badges'
cursor.execute("INSERT INTO inventario (id_usuario, item_id,nome_item,tipo_item) VALUES (?,?,?,?)", (user,item_id,nome_item,tipo_item))
connection.commit()
# async def level_card(self,img,lvl,canal):
# url =requests.get(img)
# avatar = Image.open(BytesIO(url.content))
# avatar = avatar.resize((130, 130));
# bigsize = (avatar.size[0] * 3, avatar.size[1] * 3)
# mask = Image.new('L', bigsize, 0)
# draw = ImageDraw.Draw(mask)
# draw.ellipse((0, 0) + bigsize, fill=255)
# mask = mask.resize(avatar.size, Image.ANTIALIAS)
# avatar.putalpha(mask)
# output = ImageOps.fit(avatar, mask.size, centering=(0.5, 0.5))
# output.putalpha(mask)
# output.save('img/levelup/avatar.png')
# #back_ground = Image.open('img/levelup/levelupcard.png')
# def create_gif(seta_1,seta_2,seta_3,seta_4,seta_5,seta_6,lvl):
# back_ground = Image.open('img/levelup/levelupcard.png')
# setinha = Image.open("img/levelup/setinha.png")
# nome_fonte = ImageFont.truetype('fonts/uni-sans.heavy-caps.otf',35)
# avatar = Image.open ('img/levelup/avatar.png')
# level = ImageDraw.Draw(back_ground)
# level.text(xy=(437,46), text="{}".format(lvl), fill=(13, 13, 13), font=nome_fonte)
# back_ground.paste(avatar, (22, 24), avatar)
# back_ground.paste(setinha, (seta_1, 129), setinha)
# back_ground.paste(setinha, (seta_2, 129), setinha)
# back_ground.paste(setinha, (seta_3, 129), setinha)
# back_ground.paste(setinha, (seta_4, 129), setinha)
# back_ground.paste(setinha, (seta_5, 129), setinha)
# back_ground.paste(setinha, (seta_6, 129), setinha)
# return back_ground
# frames = []
# #x, y = 246, 43
# seta_1 = 149
# seta_2 = 169
# seta_3 = 189
# seta_4 = 209
# seta_5 = 229
# seta_6 = 249
# for i in range(10):
# seta_1 += 20
# seta_2 += 20
# seta_3 += 20
# seta_4 += 20
# seta_5 += 20
# seta_6 += 20
# new_frame = create_gif(seta_1,seta_2,seta_3,seta_4,seta_5,seta_6,lvl)
# frames.append(new_frame)
# # Save into a GIF file that loops forever
# frames[0].save('img/levelup/levelupcard_1.gif', format='GIF', append_images=frames[1:], save_all=True, duration=100, loop=0, transparency=0)
# info_png = discord.File('img/levelup/levelupcard_1.gif')
# channel = self.client.get_channel(canal)
# await channel.send(file=info_png)
def setup(client):
client.add_cog(Levelling(client))
| 44.289474
| 315
| 0.561596
|
2783d608dc76fde706edd50b0b7aabce7fd79a5b
| 1,048
|
py
|
Python
|
bluebottle/assignments/migrations/0025_auto_20210301_1546.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 10
|
2015-05-28T18:26:40.000Z
|
2021-09-06T10:07:03.000Z
|
bluebottle/assignments/migrations/0025_auto_20210301_1546.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 762
|
2015-01-15T10:00:59.000Z
|
2022-03-31T15:35:14.000Z
|
bluebottle/assignments/migrations/0025_auto_20210301_1546.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 9
|
2015-02-20T13:19:30.000Z
|
2022-03-08T14:09:17.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-03-01 14:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('activities', '0038_auto_20210127_1358'),
('assignments', '0024_auto_20201112_1519'),
]
operations = [
migrations.RemoveField(
model_name='applicant',
name='contributor_ptr',
),
migrations.RemoveField(
model_name='applicant',
name='document',
),
migrations.RemoveField(
model_name='assignment',
name='activity_ptr',
),
migrations.RemoveField(
model_name='assignment',
name='expertise',
),
migrations.RemoveField(
model_name='assignment',
name='location',
),
migrations.DeleteModel(
name='Applicant',
),
migrations.DeleteModel(
name='Assignment',
),
]
| 24.372093
| 51
| 0.549618
|
616b77414b2897b364e82c1eb234ea26f616fa8a
| 634
|
py
|
Python
|
backend/manage.py
|
crowdbotics-apps/chatlata-29726
|
fcf979a4e31ed0178c5812b2bb074212fe84e072
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
crowdbotics-apps/chatlata-29726
|
fcf979a4e31ed0178c5812b2bb074212fe84e072
|
[
"FTL",
"AML",
"RSA-MD"
] | 14
|
2021-08-15T17:12:39.000Z
|
2022-01-23T17:02:34.000Z
|
backend/manage.py
|
crowdbotics-apps/chatlata-29726
|
fcf979a4e31ed0178c5812b2bb074212fe84e072
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "chatlata_29726.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| 28.818182
| 78
| 0.68612
|
aa35d9e81b21c0295a186e99c055c0c3ac3855c9
| 2,891
|
py
|
Python
|
jax3d/projects/nesf/utils/file_utils.py
|
google-research/jax3d
|
f08ae174c80cd6c597f8fe5417168c82857eb236
|
[
"Apache-2.0"
] | 33
|
2021-12-15T12:59:28.000Z
|
2022-03-30T05:29:09.000Z
|
jax3d/projects/nesf/utils/file_utils.py
|
google-research/jax3d
|
f08ae174c80cd6c597f8fe5417168c82857eb236
|
[
"Apache-2.0"
] | 5
|
2021-12-15T15:15:03.000Z
|
2021-12-15T17:25:27.000Z
|
jax3d/projects/nesf/utils/file_utils.py
|
google-research/jax3d
|
f08ae174c80cd6c597f8fe5417168c82857eb236
|
[
"Apache-2.0"
] | 1
|
2022-01-29T18:30:02.000Z
|
2022-01-29T18:30:02.000Z
|
# Copyright 2022 The jax3d Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File-related utils."""
import contextlib
import functools
import io
import typing
from typing import Iterator, Optional
from etils import epath
from jax3d.projects.nesf.utils import dataclass_utils
from jax3d.projects.nesf.utils.typing import PathLike # pylint: disable=g-multiple-import
# pathlib-like abstraction
Path = epath.Path
# Convert a resource path to write path.
# Used for automated scripts which write to
write_path = epath.to_write_path
class PathField(
dataclass_utils.DataclassField[Optional[PathLike], Optional[Path]]
):
"""Descriptor which converts `str` to pathlib-like Path.
Meant to be used in dataclasses (like `dataclasses.field`) to accept `str` as
input, and convert them to pathlib-like objects.
Example:
```python
@dataclasses.dataclass
class MyData:
root_dir: j3d.Path = j3d.utils.PathField()
my_data = MyData(root_dir='/path/to/file') # `str` as input
# `str` is automatically converted to pathlib-like abstraction:
my_data.root_dir.joinpath('file.txt').read_text()
```
"""
def _validate(self, value: Optional[PathLike]) -> Optional[Path]:
return None if value is None else Path(value)
@functools.lru_cache()
def j3d_dir() -> Path:
"""Root directory of `jax3d.projects.nesf/`."""
path = epath.resource_path(
'jax3d.projects.nesf')
return typing.cast(Path, path)
@functools.lru_cache()
def nf_dir() -> Path:
"""Root directory for `jax3d/nerfstatic/`."""
return j3d_dir() / 'nerfstatic'
@contextlib.contextmanager
def open_seekable(path: PathLike, mode: str) -> Iterator[io.BytesIO]:
"""Same as `path.open('rb')`, but write to intermediate buffer.
Rather than reading/writing directly to the file, file operations are applied
on an in-memory buffer. This require the full file to be loaded in-memory.
This is useful when file operation requires `f.seek()` which is not supported
on some file systems.
Args:
path: Path on which save the value
mode: `rb` or `wb`
Yields:
The file-like object on which write.
"""
path = Path(path)
if mode == 'rb':
buffer = io.BytesIO(path.read_bytes())
elif mode == 'wb':
buffer = io.BytesIO()
else:
raise ValueError(f'Unsuported mode: {mode}')
yield buffer
if mode == 'wb':
path.write_bytes(buffer.getvalue())
| 28.067961
| 90
| 0.72155
|
bfaa96effc6e882262eb89c8b2428ae275124bd1
| 9,798
|
py
|
Python
|
mmdet/core/bbox/assigners/max_iou_assigner.py
|
Brym-Gyimah/mmdetection
|
d5d749afe57c77e2ec4500395faed3566fdfedae
|
[
"Apache-2.0"
] | 20,190
|
2018-09-10T01:11:53.000Z
|
2022-03-31T22:31:33.000Z
|
mmdet/core/bbox/assigners/max_iou_assigner.py
|
Joker-co/mmdet_pro
|
96abfd90cf0e38c5ce398795f949e9328eb85c1b
|
[
"Apache-2.0"
] | 6,736
|
2018-09-17T09:45:51.000Z
|
2022-03-31T22:54:10.000Z
|
mmdet/core/bbox/assigners/max_iou_assigner.py
|
Joker-co/mmdet_pro
|
96abfd90cf0e38c5ce398795f949e9328eb85c1b
|
[
"Apache-2.0"
] | 7,837
|
2018-09-11T02:58:23.000Z
|
2022-03-31T22:31:38.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class MaxIoUAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, or a semi-positive integer
indicating the ground truth index.
- -1: negative sample, no assigned gt
- semi-positive integer: positive sample, index (0-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
gt_max_assign_all (bool): Whether to assign all bboxes with the same
highest overlap with some gt to that gt.
ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
`gt_bboxes_ignore` is specified). Negative values mean not
ignoring any bboxes.
ignore_wrt_candidates (bool): Whether to compute the iof between
`bboxes` and `gt_bboxes_ignore`, or the contrary.
match_low_quality (bool): Whether to allow low quality matches. This is
usually allowed for RPN and single stage detectors, but not allowed
in the second stage. Details are demonstrated in Step 4.
gpu_assign_thr (int): The upper bound of the number of GT for GPU
assign. When the number of gt is above this threshold, will assign
on CPU device. Negative values mean not assign on CPU.
"""
def __init__(self,
pos_iou_thr,
neg_iou_thr,
min_pos_iou=.0,
gt_max_assign_all=True,
ignore_iof_thr=-1,
ignore_wrt_candidates=True,
match_low_quality=True,
gpu_assign_thr=-1,
iou_calculator=dict(type='BboxOverlaps2D')):
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_pos_iou = min_pos_iou
self.gt_max_assign_all = gt_max_assign_all
self.ignore_iof_thr = ignore_iof_thr
self.ignore_wrt_candidates = ignore_wrt_candidates
self.gpu_assign_thr = gpu_assign_thr
self.match_low_quality = match_low_quality
self.iou_calculator = build_iou_calculator(iou_calculator)
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign gt to bboxes.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, or a semi-positive number. -1 means negative
sample, semi-positive number is the index (0-based) of assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to the background
2. assign proposals whose iou with all gts < neg_iou_thr to 0
3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals (may be more than
one) to itself
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
Example:
>>> self = MaxIoUAssigner(0.5, 0.5)
>>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
>>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]])
>>> assign_result = self.assign(bboxes, gt_bboxes)
>>> expected_gt_inds = torch.LongTensor([1, 0])
>>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
"""
assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
gt_bboxes.shape[0] > self.gpu_assign_thr) else False
# compute overlap and assign gt on CPU when number of GT is large
if assign_on_cpu:
device = bboxes.device
bboxes = bboxes.cpu()
gt_bboxes = gt_bboxes.cpu()
if gt_bboxes_ignore is not None:
gt_bboxes_ignore = gt_bboxes_ignore.cpu()
if gt_labels is not None:
gt_labels = gt_labels.cpu()
overlaps = self.iou_calculator(gt_bboxes, bboxes)
if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = self.iou_calculator(
bboxes, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
else:
ignore_overlaps = self.iou_calculator(
gt_bboxes_ignore, bboxes, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
if assign_on_cpu:
assign_result.gt_inds = assign_result.gt_inds.to(device)
assign_result.max_overlaps = assign_result.max_overlaps.to(device)
if assign_result.labels is not None:
assign_result.labels = assign_result.labels.to(device)
return assign_result
def assign_wrt_overlaps(self, overlaps, gt_labels=None):
"""Assign w.r.t. the overlaps of bboxes with gts.
Args:
overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
shape(k, n).
gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
# 1. assign -1 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
if num_gts == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gts,
assigned_gt_inds,
max_overlaps,
labels=assigned_labels)
# for each anchor, which gt best overlaps with it
# for each anchor, the max iou of all gts
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
# 2. assign negative: below
# the negative inds are set to be 0
if isinstance(self.neg_iou_thr, float):
assigned_gt_inds[(max_overlaps >= 0)
& (max_overlaps < self.neg_iou_thr)] = 0
elif isinstance(self.neg_iou_thr, tuple):
assert len(self.neg_iou_thr) == 2
assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
& (max_overlaps < self.neg_iou_thr[1])] = 0
# 3. assign positive: above positive IoU threshold
pos_inds = max_overlaps >= self.pos_iou_thr
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
if self.match_low_quality:
# Low-quality matching will overwrite the assigned_gt_inds assigned
# in Step 3. Thus, the assigned gt might not be the best one for
# prediction.
# For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,
# bbox 1 will be assigned as the best target for bbox A in step 3.
# However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's
# assigned_gt_inds will be overwritten to be bbox B.
# This might be the reason that it is not used in ROI Heads.
for i in range(num_gts):
if gt_max_overlaps[i] >= self.min_pos_iou:
if self.gt_max_assign_all:
max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
assigned_gt_inds[max_iou_inds] = i + 1
else:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
| 45.785047
| 79
| 0.602164
|
56ecd305b4b5bf5249c4d3ed049b0e94d29dbb81
| 1,313
|
py
|
Python
|
metrics.py
|
thelawofueki/viptest
|
5827b1cba812b06ffa5fd1341162ad4547b6c3cc
|
[
"MIT"
] | null | null | null |
metrics.py
|
thelawofueki/viptest
|
5827b1cba812b06ffa5fd1341162ad4547b6c3cc
|
[
"MIT"
] | null | null | null |
metrics.py
|
thelawofueki/viptest
|
5827b1cba812b06ffa5fd1341162ad4547b6c3cc
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from viptela import Viptela
MAPPING = {
"rx_pkts": "network-in-packets.%s.cntr",
"tx_pkts": "network-out-packets.%s.cntr",
}
def get_metrics(event, context):
VIPTELA_USERNAME = event["credentials"]["username"]
VIPTELA_PSWD = event["credentials"]["password"]
VIPTELA_SERVER = "54.251.162.192"
vip_cli = Viptela(VIPTELA_USERNAME, VIPTELA_PSWD, VIPTELA_SERVER)
data = vip_cli.get_device_metrics(event["resource"]["provider_id"])
print "Fetched %d data samples from Viptela" % len(data)
if data:
metrics = list(format_metrics(data))
return {
"last_update": metrics[-1]["time"],
"metrics": metrics,
}
return {
"metrics": []
}
def format_metrics(data):
metrics = []
for sample in data:
metrics += format_sample(sample)
return metrics
def format_sample(sample):
metrics = []
ts = datetime.fromtimestamp(sample.get("lastupdated") / 1e3)
for metric_name, cmp_metric_name in MAPPING.iteritems():
metrics.append({
"metric": cmp_metric_name % sample.get("dest-ip"),
"value": sample.get(metric_name),
"unit": "packets/s",
"time": ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
})
return metrics
| 26.795918
| 71
| 0.616146
|
c2d6633686af99a03c3307ae6e81be8428de5d12
| 3,279
|
py
|
Python
|
examples/cycle_gan.py
|
bobelly/torchsupport
|
5aa0a04f20c193ec99310f5d6a3375d2e95e740d
|
[
"MIT"
] | 18
|
2019-05-02T16:32:15.000Z
|
2021-04-16T09:33:54.000Z
|
examples/cycle_gan.py
|
bobelly/torchsupport
|
5aa0a04f20c193ec99310f5d6a3375d2e95e740d
|
[
"MIT"
] | 5
|
2019-10-14T13:46:49.000Z
|
2021-06-08T11:48:34.000Z
|
examples/cycle_gan.py
|
bobelly/torchsupport
|
5aa0a04f20c193ec99310f5d6a3375d2e95e740d
|
[
"MIT"
] | 12
|
2019-05-12T21:34:24.000Z
|
2021-07-15T14:14:16.000Z
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.utils.data import Dataset
from torchvision.datasets import ImageFolder
from torchsupport.modules.basic import MLP
from torchsupport.training.translation import CycleGANTraining, PairedGANTraining
class MiniEdges2Shoes(Dataset):
def __init__(self, path, mode=0):
self.data = ImageFolder(path)
self.indices = [
idx
for idx, sample in enumerate(self.data.samples)
if sample[1] == mode
]
def __getitem__(self, index):
position = self.indices[index]
img, _ = self.data[position]
img = torch.tensor(np.array(img)).permute(2, 0, 1).to(torch.float) / 255
edge = img[:, :, :256].unsqueeze(0)
shoe = img[:, :, 256:].unsqueeze(0)
edge = func.adaptive_max_pool2d(1 - edge, (28, 28))
shoe = func.adaptive_avg_pool2d(shoe, (28, 28))
return edge[0], shoe[0]
def __len__(self):
return len(self.indices)
class UnpairedEdge2Shoes(MiniEdges2Shoes):
def __init__(self, path, mode=0):
super().__init__(path, mode=mode)
def __getitem__(self, index):
edge_index = torch.randint(0, len(self) - 1, (1,))[0]
shoe_index = torch.randint(0, len(self) - 1, (1,))[0]
edge, _ = super().__getitem__(edge_index)
_, shoe = super().__getitem__(shoe_index)
return edge, shoe
class Generator(nn.Module):
def __init__(self, z=32):
super().__init__()
self.z = z
self.generate = MLP(3 * 28 * 28 + z, 3 * 28 * 28, depth=4)
def sample(self, batch_size):
return torch.randn(batch_size, self.z)
def forward(self, sample, condition):
condition = condition.view(condition.size(0), -1)
inputs = torch.cat((condition, sample), dim=1)
result = torch.sigmoid(self.generate(inputs))
result = result.view(result.size(0), 3, 28, 28)
return result
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.preprocess = nn.Conv2d(3, 32, 1)
self.blocks = nn.ModuleList([
nn.Conv2d(32, 32, 3, padding=1)
for idx in range(4)
])
self.postprocess = nn.Linear(32, 1)
def forward(self, data):
out = func.relu(self.preprocess(data))
for block in self.blocks:
out = func.relu(block(out))
out = func.max_pool2d(out, 2)
out = func.adaptive_avg_pool2d(out, 1).view(out.size(0), -1)
return self.postprocess(out)
class E2SGANTraining(CycleGANTraining):
def each_generate(self, data, translated, cycled):
data_points = torch.cat([x for x in data[0][:5]], dim=2).detach()
translated_points = torch.cat([x for x in translated[0][:5]], dim=2).detach()
cycled_points = torch.cat([x for x in cycled[0][:5]], dim=2).detach()
self.writer.add_image("data", data_points, self.step_id)
self.writer.add_image("translated", translated_points, self.step_id)
self.writer.add_image("cycled", cycled_points, self.step_id)
if __name__ == "__main__":
data = UnpairedEdge2Shoes("~/Downloads/edges2shoes/")
generators = (Generator(), Generator())
discriminators = (Discriminator(), Discriminator())
training = E2SGANTraining(
generators, discriminators, data,
network_name="e2s-cycle",
device="cpu",
batch_size=64,
max_epochs=1000,
verbose=True
)
training.train()
| 30.64486
| 81
| 0.673376
|
8647131633b9d7a5a6638341c24b844904b8df81
| 731
|
py
|
Python
|
blog/migrations/0001_initial.py
|
naritotakizawa/django-notify-sample
|
c9673e98503b822adcdae738accbb30182ba6817
|
[
"MIT"
] | 2
|
2019-07-29T00:24:31.000Z
|
2019-08-04T13:28:13.000Z
|
blog/migrations/0001_initial.py
|
naritotakizawa/django-notify-sample
|
c9673e98503b822adcdae738accbb30182ba6817
|
[
"MIT"
] | null | null | null |
blog/migrations/0001_initial.py
|
naritotakizawa/django-notify-sample
|
c9673e98503b822adcdae738accbb30182ba6817
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-07-27 01:20
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='タイトル')),
('text', models.TextField(verbose_name='本文')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='作成日')),
],
),
]
| 29.24
| 115
| 0.581395
|
b7c26aa36485b8d6f8ad21f57ef4fa601473a6ec
| 2,149
|
py
|
Python
|
testonly/mpush2/m_stat.py
|
y12studio/pi
|
c815b0b2a2421036ec99e085ffa92b1e3b5145f9
|
[
"Apache-2.0"
] | 1
|
2016-10-28T07:30:34.000Z
|
2016-10-28T07:30:34.000Z
|
testonly/mpush2/m_stat.py
|
y12studio/pi
|
c815b0b2a2421036ec99e085ffa92b1e3b5145f9
|
[
"Apache-2.0"
] | null | null | null |
testonly/mpush2/m_stat.py
|
y12studio/pi
|
c815b0b2a2421036ec99e085ffa92b1e3b5145f9
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2013 Y12Studio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import m_dys388icon as ledicon
import m_dys388dbp as ledDys388
import numpy as np
import collections, array
import numpy as np
from scipy import stats
class StatSizeDiff:
def __init__(self, queueSize):
self.size = queueSize
self.xi = np.arange(0,queueSize)
self.sizeQueue = collections.deque(maxlen=queueSize)
self.diffQueue = collections.deque(maxlen=queueSize)
self.stdQueue = collections.deque(maxlen=queueSize)
self.rQueue = collections.deque(maxlen=queueSize)
for i in range(queueSize):
self.sizeQueue.append(0)
self.diffQueue.append(0)
self.stdQueue.append(0)
self.rQueue.append(0)
def getNpStd(self,s):
self._addSize(s)
stddev = int(np.std(self.diffQueue))
self.stdQueue.append(stddev)
#print 'STD_DEV=',['%.2f'%i for i in self.stdQueue]
return stddev
def _addSize(self,s):
self.sizeQueue.append(s)
#print "SIZE=",self.sizeQueue
diff = abs(self.sizeQueue[self.size-1]-self.sizeQueue[self.size-2])
self.diffQueue.append(diff)
#print "DIFF=",self.diffQueue
def getScipiLinregress(self,s):
self._addSize(s)
slope, intercept, r_value, p_value, std_err = stats.linregress(self.xi,self.diffQueue)
self.stdQueue.append(std_err)
self.rQueue.append(r_value)
#print 'STDERR=',['%.2f'%i for i in self.stdQueue]
#print 'R=',['%.2f'%i for i in self.rQueue]
return std_err
| 35.816667
| 94
| 0.660772
|
7b1fa5259ebb0004788694e7128e15a0eb8067d0
| 1,589
|
py
|
Python
|
src/grammar_tester/commands.py
|
akolonin/language-learning
|
022c34a3066aa97ea0d007419e026247a4f78dd5
|
[
"MIT"
] | null | null | null |
src/grammar_tester/commands.py
|
akolonin/language-learning
|
022c34a3066aa97ea0d007419e026247a4f78dd5
|
[
"MIT"
] | null | null | null |
src/grammar_tester/commands.py
|
akolonin/language-learning
|
022c34a3066aa97ea0d007419e026247a4f78dd5
|
[
"MIT"
] | null | null | null |
from typing import List
from .optconst import *
def get_sed_regex(options: int) -> str:
# If BIT_ULL_IN sed filters links leaving only sentences and removes square brackets around tokens if any.
if options & BIT_ULL_IN:
return r'/\(^[0-9].*$\)\|\(^$\)/d;s/\[\([a-z0-9A-Z.,:\@"?!*~()\/\#\$&;^%_`\0xe2\x27\xE2\x80\x94©®°•…≤±×΅⁻¹²³€αβπγδμεθ«»=+-]*\)\]/\1/g;s/.*/\L\0/g' \
if options & BIT_INPUT_TO_LCASE \
else r'/\(^[0-9].*$\)\|\(^$\)/d;s/\[\([a-z0-9A-Z.,:\@"?!*~()\/\#\$&;^%_`\0xe2\x27\xE2\x80\x94©®°•…≤±×΅⁻¹²³€αβπγδμεθ«»=+-]*\)\]/\1/g'
# Otherwise sed removes only empty lines.
else:
return r"/^$/d;s/.*/\L\0/g" if options & BIT_INPUT_TO_LCASE else r"/^$/d"
def get_linkparser_command(options: int, dict_path: str, limit: int, timeout: int, verb: int=1) -> List[str]:
verbosity = "-verbosity={}".format(verb)
# Make command option list depending on the output format specified.
if not (options & BIT_OUTPUT) or (options & BIT_OUTPUT_POSTSCRIPT):
lgp_cmd = ["link-parser", dict_path, "-echo=1", "-postscript=1", "-graphics=0", verbosity,
"-limit=" + str(limit), "-timeout=" + str(timeout)]
elif options & BIT_OUTPUT_CONST_TREE:
lgp_cmd = ["link-parser", dict_path, "-echo=1", "-constituents=1", "-graphics=0", verbosity,
"-limit=" + str(limit), "-timeout=" + str(timeout)]
else:
lgp_cmd = ["link-parser", dict_path, "-echo=1", "-graphics=1", verbosity,
"-limit=" + str(limit), "-timeout=" + str(timeout)]
return lgp_cmd
| 46.735294
| 156
| 0.569541
|
dadd0fc5017cd8ecb24d18485dde6d10801a8fd3
| 330
|
py
|
Python
|
coding_interviews/leetcode/easy/find_the_highest_altitude/find_the_highest_altitude.py
|
LeandroTk/Algorithms
|
569ed68eba3eeff902f8078992099c28ce4d7cd6
|
[
"MIT"
] | 205
|
2018-12-01T17:49:49.000Z
|
2021-12-22T07:02:27.000Z
|
coding_interviews/leetcode/easy/find_the_highest_altitude/find_the_highest_altitude.py
|
LeandroTk/Algorithms
|
569ed68eba3eeff902f8078992099c28ce4d7cd6
|
[
"MIT"
] | 2
|
2020-01-01T16:34:29.000Z
|
2020-04-26T19:11:13.000Z
|
coding_interviews/leetcode/easy/find_the_highest_altitude/find_the_highest_altitude.py
|
LeandroTk/Algorithms
|
569ed68eba3eeff902f8078992099c28ce4d7cd6
|
[
"MIT"
] | 50
|
2018-11-28T20:51:36.000Z
|
2021-11-29T04:08:25.000Z
|
# https://leetcode.com/problems/find-the-highest-altitude
def largest_altitude(gain):
highest_altitude, current_altitude = 0, 0
for net_gain in gain:
current_altitude += net_gain
if current_altitude > highest_altitude:
highest_altitude = current_altitude
return highest_altitude
| 30
| 57
| 0.70303
|
1533d277a3e94c8299bac6573e487ee30b5f2ac8
| 7,157
|
py
|
Python
|
ml_analysis/SVM.py
|
damien-marion/analysis
|
110fed55d24802e85ff3e8fc224376481a822649
|
[
"MIT"
] | null | null | null |
ml_analysis/SVM.py
|
damien-marion/analysis
|
110fed55d24802e85ff3e8fc224376481a822649
|
[
"MIT"
] | null | null | null |
ml_analysis/SVM.py
|
damien-marion/analysis
|
110fed55d24802e85ff3e8fc224376481a822649
|
[
"MIT"
] | null | null | null |
"""
File: SVM.py
Project: analysis
Last Modified: 2022-2-7
Created Date: 2022-2-7
Copyright (c) 2021
Author: AHMA project (Univ Rennes, CNRS, Inria, IRISA)
"""
################################################################################
import sys, os, glob
import logging
import numpy as np
import joblib
import argparse
import time
from tqdm import tqdm
from datetime import datetime
from sklearn.metrics import classification_report
sys.path.append(os.path.join (os.path.dirname (__file__), "../pre-processings/"))
from nicv import compute_nicv
from list_manipulation import get_tag
from evaluation import mean_by_tags, load_traces
################################################################################
def evaluate (path_lists, log_file, model_lda, model_svm, mean_sizes, nb_of_bd,
path_acc, time_limit):
################################################################################
# mean_by_label
# compute the LDA + learning algorithm SVM
#
# input:
# + path_lists: path of the lists
# + log_file: where the results are saved
# + model_{lda, nb}: prevously saved {LDA, SVM}-model
# + mean_sizes: numbers of mean sizes to try.
# + path_acc: directory where acculmulators are
# + time_limit: percentage of the trace (from the begining)
# + nb_of_bd: nb of frequency band to conserve
#
################################################################################
## logging exp in file
today = datetime.now ()
d1 = today.strftime ("%d.%m.%Y - %H:%M:%S")
file_log = open (log_file, 'a')
file_log.write ('-'*80 + '\n')
file_log.write (d1 + '\n')
file_log.write ('path_lists: %s\n'%str (path_lists)\
+ 'log_file: %s\n'%str (log_file)\
+ 'model_lda: %s\n'%str (model_lda)\
+ 'model_svm: %s\n'%str (model_svm)\
+ 'model_nb: None\n'\
+ 'means: %s\n'%str (mean_sizes)\
+ 'nb_of_bd: %s\n'%str (nb_of_bd)\
+ 'path_acc: %s\n'%str (path_acc)\
+ 'time_limit: %s\n'%str (time_limit)\
+ 'metric: max_nicv\n')
file_log.write ('-'*80 + '\n')
file_log.close ()
## load lists
[_, _, x_test_filelist, _, _, y_test] \
= np.load (path_lists, allow_pickle = True)
## load LDA (needed for meaning)
clf_known = False
if (model_lda.split ('.')[-1] == 'jl'): # if the model is given
## get indexes
_, _, nicv, bandwidth = compute_nicv (path_lists, path_acc, None,\
bandwidth_nb = nb_of_bd,
time_limit = time_limit)
clf = joblib.load (model_lda)
## testingx
testing_traces = load_traces (x_test_filelist, bandwidth, time_limit)
## LDA
t0 = time.time ()
X = clf.transform (testing_traces.T)
clf_known = True
else: # meaning it is the transformed traces (numpy format)
## testing
t0 = time.time ()
X = np.load (model_lda, allow_pickle = True)
## testing
testing_labels = y_test
## get tags to be able to mean
testing_tags = np.array ([get_tag (f) for f in x_test_filelist])
## load SVM
svm = joblib.load (model_svm)
file_log = open (log_file, 'a')
file_log.write ('transform (size: %s): %s seconds\n'%(str(X.shape), str (time.time () - t0)))
file_log.close ()
## NB
t0 = time.time ()
predicted_labels = svm.predict (X)
file_log = open (log_file, 'a')
file_log.write ('Test SVM (size: %s) [%s seconds]:\n'%(str (X.shape), str (time.time () - t0)))
# file_log.write (f'f1: {f1:0.4f} - r_score: {r_score:0.4f} - average_precision: {average_precision:0.4f}')
file_log.write (f'{classification_report (list (testing_labels), predicted_labels, digits = 4, zero_division = 0)}')
file_log.close ()
## compute for all means size
if (clf_known):
for mean_size in mean_sizes:
file_log = open (log_file, 'a')
file_log.write ('compute with %s per mean\n'%mean_size)
file_log.close ()
X, y = mean_by_tags (testing_traces, testing_tags,
np.array (testing_labels), x_test_filelist, mean_size)
# SVM
t0 = time.time ()
predicted_labels = svm.predict (clf.transform (X.T))
file_log = open (log_file, 'a')
file_log.write (f'SVM - mean {mean_size}:\n {classification_report (list (y), predicted_labels, digits = 4, zero_division = 0)}')
file_log.close ()
################################################################################
if __name__ == '__main__':
################################################################################
parser = argparse.ArgumentParser()
parser.add_argument ('--lists', action = 'store',
type = str, dest = 'path_lists',
help = 'Absolute path to a file containing the lists')
parser.add_argument ('--model_lda', action = 'store', type=str,
dest = 'model_lda',
help = 'Absolute path to the file where the LDA model has been previously saved')
parser.add_argument ('--model_svm', action = 'store', type=str,
dest = 'model_svm',
help = 'Absolute path to the file where the SVM model has been previously saved')
parser.add_argument("--mean_size", default = [2,3,4,5,6,7,8,9,10],
action = 'append', dest = 'mean_sizes',
help = 'Size of each means')
parser.add_argument('--log-file', default = 'log-evaluation.txt',
dest = 'log_file',
help = 'Absolute path to the file to save results')
parser.add_argument ('--time_limit', action ='store', type = float, default = 0.5,
dest = 'time_limit',
help = 'percentage of time to concerve (from the begining)')
parser.add_argument ('--acc', action='store', type=str,
dest='path_acc',
help='Absolute path of the accumulators directory')
args, unknown = parser.parse_known_args ()
assert len (unknown) == 0, f"[ERROR] Unknown arguments:\n{unknown}\n"
nb_of_bandwidth_lda = int (args.model_lda.split ('/')[-1].split ('_')[-1].split ('.')[0][:-2])
nb_of_bandwidth_svm = int (args.model_svm.split ('/')[-1].split ('_')[-1].split ('.')[0][:-2])
assert nb_of_bandwidth_lda == nb_of_bandwidth_svm,\
f"[ERROR] bad selected models, number of bandwidth must be the same\n"
evaluate (args.path_lists,
args.log_file,
args.model_lda,
args.model_svm,
args.mean_sizes,
nb_of_bandwidth_lda,
args.path_acc,
args.time_limit)
| 38.069149
| 141
| 0.523683
|
cdc4f12df7fb563cdb6dbc9505ad04e30b02ad96
| 3,450
|
py
|
Python
|
solutions/year_2020/day_7/handy_haversacks.py
|
svandermeulen/advent_of_code
|
99fe936a564f8ee66f8f28aaac4e36e013ebc28d
|
[
"MIT"
] | null | null | null |
solutions/year_2020/day_7/handy_haversacks.py
|
svandermeulen/advent_of_code
|
99fe936a564f8ee66f8f28aaac4e36e013ebc28d
|
[
"MIT"
] | null | null | null |
solutions/year_2020/day_7/handy_haversacks.py
|
svandermeulen/advent_of_code
|
99fe936a564f8ee66f8f28aaac4e36e013ebc28d
|
[
"MIT"
] | null | null | null |
"""
-*- coding: utf-8 -*-
Written by: sme30393
Date: 07/12/2020
"""
import os
import re
from typing import List
from solutions.config import Config
from solutions.year_2020.utils.file_manager import read_txt_file
def parse_rules(data: List[str]) -> dict:
data = [re.split("contain|, ", rule) for rule in data]
data = [[item.strip(" |.") for item in rule] for rule in data]
data_dict = {rule[0]: rule[1:] for rule in data}
return data_dict
def parse_bag(bag: str) -> (str, int):
bag_count, = re.findall("^\d+", bag)
bag_count = int(bag_count)
bag_renamed = re.sub('^\d+\s+', "", bag)
if bag_count < 2:
bag_renamed += "s"
return bag_renamed, bag_count
def find_gold_carrying_bags(data: dict, bag_type: str = "shiny gold bag", bags_gold: list = None) -> list:
bags_gold = bags_gold if bags_gold is not None else []
bags_containing_type = [key for key, lst in data.items() if any([bag_type in val for val in lst])]
bags_containing_type = [bag for bag in bags_containing_type if bag not in bags_gold]
if bags_containing_type:
bags_gold.extend(bags_containing_type)
for bag in bags_containing_type:
bags_gold = find_gold_carrying_bags(data=data, bag_type=bag.rstrip("s"), bags_gold=bags_gold)
return bags_gold
def unpack(rules: dict, inner_bags: str, bags_unpacked: list = None) -> list:
bags_unpacked = bags_unpacked if bags_unpacked is not None else []
for bag in inner_bags:
if bag == "no other bags":
return bags_unpacked
bag, bag_count = parse_bag(bag=bag)
if rules[bag] == ["no other bags"]:
bags_unpacked.extend(bag_count * [bag])
continue
if bag in rules:
for _ in range(bag_count):
bags_unpacked.append(bag)
bags_unpacked = unpack(rules=rules, inner_bags=rules[bag], bags_unpacked=bags_unpacked)
return bags_unpacked
def main():
config = Config(day=7)
# PART ONE
path_file = os.path.join(config.path_data, "bag_color_rules_test.txt")
data_test = read_txt_file(path_file=path_file)
data_test_parsed = parse_rules(data=data_test)
bags_carrying_shiny_gold = find_gold_carrying_bags(data=data_test_parsed, bag_type="shiny gold bag")
n_bags_carrying_shiny_gold = len(bags_carrying_shiny_gold)
assert 4 == n_bags_carrying_shiny_gold
path_file = os.path.join(config.path_data, "bag_color_rules.txt")
data = read_txt_file(path_file=path_file)
data_parsed = parse_rules(data=data)
bags_carrying_shiny_gold = find_gold_carrying_bags(data=data_parsed)
assert 128 == len(bags_carrying_shiny_gold)
# PART TWO
bags_unpacked = unpack(rules=data_test_parsed, inner_bags=data_test_parsed["shiny gold bags"])
assert 32 == len(bags_unpacked)
path_file = os.path.join(config.path_data, "bag_color_rules_test_part_two.txt")
data_test_two = read_txt_file(path_file=path_file)
data_test_two_parsed = parse_rules(data=data_test_two)
bags_unpacked = unpack(rules=data_test_two_parsed, inner_bags=data_test_two_parsed["shiny gold bags"])
assert 126 == len(bags_unpacked)
bags_unpacked = unpack(rules=data_parsed, inner_bags=data_parsed["shiny gold bags"])
print(f"The total number of bags within a shiny gold bag equals: {len(bags_unpacked)}")
assert 20189 == len(bags_unpacked)
return True
if __name__ == "__main__":
main()
| 33.823529
| 106
| 0.70029
|
62a20df2712c6e0e8d6457dd147f60ab64a91712
| 273
|
py
|
Python
|
lcc/compiler.py
|
ronyhe/lcc
|
f352a8c973352e7ed49efdd11099182f21fdd06b
|
[
"MIT"
] | 6
|
2017-03-05T09:26:06.000Z
|
2017-11-16T06:46:58.000Z
|
lcc/compiler.py
|
ronyhe/lcc
|
f352a8c973352e7ed49efdd11099182f21fdd06b
|
[
"MIT"
] | null | null | null |
lcc/compiler.py
|
ronyhe/lcc
|
f352a8c973352e7ed49efdd11099182f21fdd06b
|
[
"MIT"
] | null | null | null |
from lcc import lexical, syntactical
def run_compile(source_text):
lexer = lexical.Lexer(source_text)
parser = syntactical.Parser(lexer)
program = parser.parse_program()
program.validate_free_variables()
python = program.to_python()
return python
| 24.818182
| 38
| 0.739927
|
093822f4103e60ffee1963cd75831b7de4553361
| 23,353
|
py
|
Python
|
tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_util.py
|
bourov/probability
|
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
|
[
"Apache-2.0"
] | 2
|
2020-12-17T20:43:24.000Z
|
2021-06-11T22:09:16.000Z
|
tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_util.py
|
bourov/probability
|
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_util.py
|
bourov/probability
|
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
|
[
"Apache-2.0"
] | 1
|
2020-12-19T13:05:15.000Z
|
2020-12-19T13:05:15.000Z
|
# Copyright 2020 The TensorFlow Probability Authors. All Rights Reserved.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# THIS FILE IS AUTO-GENERATED BY `gen_linear_operators.py`.
# DO NOT MODIFY DIRECTLY.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# pylint: disable=g-import-not-at-top
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-bad-import-order
# pylint: disable=unused-import
# pylint: disable=line-too-long
# pylint: disable=reimported
# pylint: disable=g-bool-id-comparison
# pylint: disable=g-statement-before-imports
# pylint: disable=bad-continuation
# pylint: disable=useless-import-alias
# pylint: disable=property-with-parameters
# pylint: disable=trailing-whitespace
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Internal utilities for `LinearOperator` classes."""
from __future__ import absolute_import
from __future__ import division
# [internal] enable type annotations
from __future__ import print_function
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import dtype as dtypes
from tensorflow_probability.python.internal.backend.numpy import ops
from tensorflow_probability.python.internal.backend.numpy import ops as module
from tensorflow_probability.python.internal.backend.numpy import numpy_array as array_ops
from tensorflow_probability.python.internal.backend.numpy import debugging as check_ops
from tensorflow_probability.python.internal.backend.numpy import control_flow as control_flow_ops
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as linalg_ops
from tensorflow_probability.python.internal.backend.numpy import numpy_math as math_ops
from tensorflow_probability.python.internal.backend.numpy import ops as variables_module
from tensorflow_probability.python.internal.backend.numpy import nest
################################################################################
# To make more friendly for TF2.
################################################################################
def convert_nonref_to_tensor(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor` if input is nonreference type.
This function converts Python objects of various types to `Tensor` objects
except if the input has nonreference semantics. Reference semantics are
characterized by `is_ref` and is any object which is a
`tf.Variable` or instance of `tf.Module`. This function accepts any input
which `tf.convert_to_tensor` would also.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so dtype_hint
can be used as a soft preference. If the conversion to
`dtype_hint` is not possible, this argument has no effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
tensor: A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
#### Examples:
```python
x = tf.Variable(0.)
y = convert_nonref_to_tensor(x)
x is y
# ==> True
x = tf.constant(0.)
y = convert_nonref_to_tensor(x)
x is y
# ==> True
x = np.array(0.)
y = convert_nonref_to_tensor(x)
x is y
# ==> False
tf.is_tensor(y)
# ==> True
x = tfp.util.DeferredTensor(13.37, lambda x: x)
y = convert_nonref_to_tensor(x)
x is y
# ==> True
tf.is_tensor(y)
# ==> False
tf.equal(y, 13.37)
# ==> True
```
"""
# We explicitly do not use a tf.name_scope to avoid graph clutter.
if value is None:
return None
if is_ref(value):
if dtype is None:
return value
dtype_base = base_dtype(dtype)
value_dtype_base = base_dtype(value.dtype)
if dtype_base != value_dtype_base:
raise TypeError('Mutable type must be of dtype "{}" but is "{}".'.format(
dtype_name(dtype_base), dtype_name(value_dtype_base)))
return value
return ops.convert_to_tensor(
value, dtype=dtype, dtype_hint=dtype_hint, name=name)
def base_dtype(dtype):
"""Returns a non-reference `dtype` based on this `dtype`."""
dtype = dtypes.as_dtype(dtype)
if hasattr(dtype, "base_dtype"):
return dtype
return dtype
def dtype_name(dtype):
"""Returns the string name for this `dtype`."""
dtype = dtypes.as_dtype(dtype)
if hasattr(dtype, "name"):
return dtype.name
if hasattr(dtype, "__name__"):
return dtype.__name__
return str(dtype)
def check_dtype(arg, dtype):
"""Check that arg.dtype == self.dtype."""
if arg.dtype != dtype:
raise TypeError(
"Expected argument to have dtype %s. Found: %s in tensor %s" %
(dtype, arg.dtype, arg))
def is_ref(x):
"""Evaluates if the object has reference semantics.
An object is deemed "reference" if it is a `tf.Variable` instance or is
derived from a `tf.Module` with `dtype` and `shape` properties.
Args:
x: Any object.
Returns:
is_ref: Python `bool` indicating input is has nonreference semantics, i.e.,
is a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties.
"""
return (
# Note: we check that tf.Variable is a class because we might be using a
# different backend other than TF.
isinstance(x, variables_module.Variable) or
(isinstance(x, module.Module) and hasattr(x, "dtype") and
hasattr(x, "shape")))
def assert_not_ref_type(x, arg_name):
if is_ref(x):
raise TypeError(
"Argument %s cannot be reference type. Found: %s" % (arg_name, type(x)))
################################################################################
# Asserts.
################################################################################
def assert_no_entries_with_modulus_zero(
x, message=None, name="assert_no_entries_with_modulus_zero"):
"""Returns `Op` that asserts Tensor `x` has no entries with modulus zero.
Args:
x: Numeric `Tensor`, real, integer, or complex.
message: A string message to prepend to failure message.
name: A name to give this `Op`.
Returns:
An `Op` that asserts `x` has no entries with modulus zero.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
should_be_nonzero = math_ops.abs(x)
zero = ops.convert_to_tensor(0, dtype=dtypes.real_dtype(dtype))
return check_ops.assert_less(zero, should_be_nonzero, message=message)
def assert_zero_imag_part(x, message=None, name="assert_zero_imag_part"):
"""Returns `Op` that asserts Tensor `x` has no non-zero imaginary parts.
Args:
x: Numeric `Tensor`, real, integer, or complex.
message: A string message to prepend to failure message.
name: A name to give this `Op`.
Returns:
An `Op` that asserts `x` has no entries with modulus zero.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return control_flow_ops.no_op()
zero = ops.convert_to_tensor(0, dtype=dtypes.real_dtype(dtype))
return check_ops.assert_equal(zero, math_ops.imag(x), message=message)
def assert_compatible_matrix_dimensions(operator, x):
"""Assert that an argument to solve/matmul has proper domain dimension.
If `tensor_shape.TensorShape(operator.shape)[-2:] = [M, N]`, and `tensor_shape.TensorShape(x.shape)[-2:] = [Q, R]`, then
`operator.matmul(x)` is defined only if `N = Q`. This `Op` returns an
`Assert` that "fires" if this is not the case. Static checks are already
done by the base class `LinearOperator`.
Args:
operator: `LinearOperator`.
x: `Tensor`.
Returns:
`Assert` `Op`.
"""
# Static checks are done in the base class. Only tensor asserts here.
assert_same_dd = check_ops.assert_equal(
array_ops.shape(x)[-2],
operator.domain_dimension_tensor(),
# This error message made to look similar to error raised by static check
# in the base class.
message=("Dimensions are not compatible. "
"shape[-2] of argument to be the same as this operator"))
return assert_same_dd
def assert_is_batch_matrix(tensor):
"""Static assert that `tensor` has rank `2` or higher."""
sh = tensor_shape.TensorShape(tensor.shape)
if sh.ndims is not None and sh.ndims < 2:
raise ValueError(
"Expected [batch] matrix to have at least two dimensions. Found: "
"%s" % tensor)
def shape_tensor(shape, name=None):
"""Convert Tensor using default type, unless empty list or tuple."""
# Works just like random_ops._ShapeTensor.
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int32
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name=name)
################################################################################
# Broadcasting versions of common linear algebra functions.
# TODO(b/77519145) Do this more efficiently in some special cases.
################################################################################
def broadcast_matrix_batch_dims(batch_matrices, name=None):
"""Broadcast leading dimensions of zero or more [batch] matrices.
Example broadcasting one batch dim of two simple matrices.
```python
x = [[1, 2],
[3, 4]] # Shape [2, 2], no batch dims
y = [[[1]]] # Shape [1, 1, 1], 1 batch dim of shape [1]
x_bc, y_bc = broadcast_matrix_batch_dims([x, y])
x_bc
==> [[[1, 2],
[3, 4]]] # Shape [1, 2, 2], 1 batch dim of shape [1].
y_bc
==> same as y
```
Example broadcasting many batch dims
```python
x = tf.random.normal(shape=(2, 3, 1, 4, 4))
y = tf.random.normal(shape=(1, 3, 2, 5, 5))
x_bc, y_bc = broadcast_matrix_batch_dims([x, y])
tensor_shape.TensorShape(x_bc.shape)
==> (2, 3, 2, 4, 4)
tensor_shape.TensorShape(y_bc.shape)
==> (2, 3, 2, 5, 5)
```
Args:
batch_matrices: Iterable of `Tensor`s, each having two or more dimensions.
name: A string name to prepend to created ops.
Returns:
bcast_matrices: List of `Tensor`s, with `bcast_matrices[i]` containing
the values from `batch_matrices[i]`, with possibly broadcast batch dims.
Raises:
ValueError: If any input `Tensor` is statically determined to have less
than two dimensions.
"""
with ops.name_scope(
name or "broadcast_matrix_batch_dims", values=batch_matrices):
check_ops.assert_proper_iterable(batch_matrices)
batch_matrices = list(batch_matrices)
for i, mat in enumerate(batch_matrices):
batch_matrices[i] = ops.convert_to_tensor(mat)
assert_is_batch_matrix(batch_matrices[i])
if len(batch_matrices) < 2:
return batch_matrices
# Try static broadcasting.
# bcast_batch_shape is the broadcast batch shape of ALL matrices.
# E.g. if batch_matrices = [x, y], with
# tensor_shape.TensorShape(x.shape) = [2, j, k] (batch shape = [2])
# tensor_shape.TensorShape(y.shape) = [3, 1, l, m] (batch shape = [3, 1])
# ==> bcast_batch_shape = [3, 2]
bcast_batch_shape = tensor_shape.TensorShape(batch_matrices[0].shape)[:-2]
for mat in batch_matrices[1:]:
bcast_batch_shape = _ops.broadcast_static_shape(
bcast_batch_shape,
tensor_shape.TensorShape(mat.shape)[:-2])
if bcast_batch_shape.is_fully_defined():
for i, mat in enumerate(batch_matrices):
if tensor_shape.TensorShape(mat.shape)[:-2] != bcast_batch_shape:
bcast_shape = array_ops.concat(
[bcast_batch_shape.as_list(), array_ops.shape(mat)[-2:]], axis=0)
batch_matrices[i] = _ops.broadcast_to(mat, bcast_shape)
return batch_matrices
# Since static didn't work, do dynamic, which always copies data.
bcast_batch_shape = array_ops.shape(batch_matrices[0])[:-2]
for mat in batch_matrices[1:]:
bcast_batch_shape = array_ops.broadcast_dynamic_shape(
bcast_batch_shape,
array_ops.shape(mat)[:-2])
for i, mat in enumerate(batch_matrices):
batch_matrices[i] = _ops.broadcast_to(
mat,
array_ops.concat(
[bcast_batch_shape, array_ops.shape(mat)[-2:]], axis=0))
return batch_matrices
def matrix_solve_with_broadcast(matrix, rhs, adjoint=False, name=None):
"""Solve systems of linear equations."""
with ops.name_scope(name, "MatrixSolveWithBroadcast", [matrix, rhs]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
rhs = ops.convert_to_tensor(rhs, name="rhs", dtype=matrix.dtype)
# If either matrix/rhs has extra dims, we can reshape to get rid of them.
matrix, rhs, reshape_inv, still_need_to_transpose = _reshape_for_efficiency(
matrix, rhs, adjoint_a=adjoint)
# This will broadcast by brute force if we still need to.
matrix, rhs = broadcast_matrix_batch_dims([matrix, rhs])
solution = _linalg.solve(
matrix, rhs, adjoint=adjoint and still_need_to_transpose)
return reshape_inv(solution)
def _reshape_for_efficiency(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False):
"""Maybe reshape a, b, and return an inverse map. For matmul/solve."""
def identity(x):
return x
# At this point, we have not taken transpose/adjoint of a/b.
still_need_to_transpose = True
if tensor_shape.TensorShape(a.shape).ndims is None or tensor_shape.TensorShape(b.shape).ndims is None:
return a, b, identity, still_need_to_transpose
# This could be handled in the future, but seems less common.
if tensor_shape.TensorShape(a.shape).ndims >= tensor_shape.TensorShape(b.shape).ndims:
return a, b, identity, still_need_to_transpose
# From now on, we might modify b, but will not modify a.
# Suppose:
# tensor_shape.TensorShape(a.shape) = C + [m, n], tensor_shape.TensorShape(b.shape) =
# tensor_shape.TensorShape(b.shape) = S + C + [n, r]
b_extra_ndims = tensor_shape.TensorShape(b.shape).ndims - tensor_shape.TensorShape(a.shape).ndims
# b_extra_sh = S, b_main_sh = C + [n, r]
b_extra_sh = array_ops.shape(b)[:b_extra_ndims]
b_main_sh = array_ops.shape(b)[b_extra_ndims:]
# No reason to flip unless the extra dims of b are big enough. Why?
# Assume adjoint/transpose = False. Then...
# By not flipping, we have to replicate a to shape
# b_extra_sh + tensor_shape.TensorShape(a.shape),
# which could use extra memory. But in all cases, the final output has shape
# b_extra_sh + tensor_shape.TensorShape(a.shape)[:-1] + tensor_shape.TensorShape([b.shape)[-1]]
# So we only end up creating a larger object if the end dim of b is smaller
# than the end dim of a. This often happens, e.g. if b was a vector that was
# expanded to a matrix (by appending a singleton).
# Since adjoint/transpose may not be False, we must make adjustments here.
# The dim of b that holds the multiple equations.
a_domain_sz_ = tensor_shape.TensorShape(a.shape)[-2 if adjoint_a or transpose_a else -1]
b_eq_sz_ = tensor_shape.TensorShape(b.shape)[-2 if adjoint_b or transpose_b else -1]
b_extra_sz_ = (
np.prod(tensor_shape.TensorShape(b.shape)[:b_extra_ndims].as_list())
if tensor_shape.TensorShape(b.shape)[:b_extra_ndims].is_fully_defined() else None)
if (a_domain_sz_ is not None and b_eq_sz_ is not None and
b_extra_sz_ is not None):
if b_extra_sz_ < 2 or a_domain_sz_ <= b_eq_sz_:
return a, b, identity, still_need_to_transpose
# At this point, we're flipping for sure!
# Any transposes/adjoints will happen here explicitly, rather than in calling
# code. Why? To avoid having to write separate complex code for each case.
if adjoint_a:
a = _linalg.matrix_transpose(a, conjugate=True)
elif transpose_a:
a = _linalg.matrix_transpose(a, conjugate=False)
if adjoint_b:
b = _linalg.matrix_transpose(b, conjugate=True)
elif transpose_a:
b = _linalg.matrix_transpose(b, conjugate=False)
still_need_to_transpose = False
# Recompute shapes, since the transpose/adjoint may have changed them.
b_extra_sh = array_ops.shape(b)[:b_extra_ndims]
b_main_sh = array_ops.shape(b)[b_extra_ndims:]
# Permutation to put the extra dims at the end.
perm = (
np.concatenate(
(np.arange(b_extra_ndims, tensor_shape.TensorShape(b.shape).ndims),
np.arange(0, b_extra_ndims)), 0))
b_extra_on_end = array_ops.transpose(b, perm=perm)
# Now squash this end into one long dim.
b_squashed_end = array_ops.reshape(
b_extra_on_end, array_ops.concat((b_main_sh[:-1], [-1]), 0))
def reshape_inv(y):
# Expand the extra dims hanging off the end, "b_extra_sh".
# Note we use y_sh[:-1] + [b_main_sh[-1]] rather than b_main_sh, because y
# Could have different batch dims than a and b, because of broadcasting.
y_extra_shape = array_ops.concat(
(array_ops.shape(y)[:-1], [b_main_sh[-1]], b_extra_sh), 0)
y_extra_on_end = array_ops.reshape(y, y_extra_shape)
inverse_perm = np.argsort(perm)
return array_ops.transpose(y_extra_on_end, perm=inverse_perm)
return a, b_squashed_end, reshape_inv, still_need_to_transpose
################################################################################
# Helpers for hints.
################################################################################
def use_operator_or_provided_hint_unless_contradicting(
operator, hint_attr_name, provided_hint_value, message):
"""Get combined hint in the case where operator.hint should equal hint.
Args:
operator: LinearOperator that a meta-operator was initialized with.
hint_attr_name: String name for the attribute.
provided_hint_value: Bool or None. Value passed by user in initialization.
message: Error message to print if hints contradict.
Returns:
True, False, or None.
Raises:
ValueError: If hints contradict.
"""
op_hint = getattr(operator, hint_attr_name)
# pylint: disable=g-bool-id-comparison
if op_hint is False and provided_hint_value:
raise ValueError(message)
if op_hint and provided_hint_value is False:
raise ValueError(message)
if op_hint or provided_hint_value:
return True
if op_hint is False or provided_hint_value is False:
return False
# pylint: disable=g-bool-id-comparison
return None
################################################################################
# Utilities for blockwise operators.
################################################################################
def arg_is_blockwise(block_dimensions, arg, arg_split_dim):
"""Detect if input should be interpreted as a list of blocks."""
# Tuples and lists of length equal to the number of operators may be
# blockwise.
if (isinstance(arg, (tuple, list)) and len(arg) == len(block_dimensions)):
# If the elements of the iterable are not nested, interpret the input as
# blockwise.
if not any(nest.is_nested(x) for x in arg):
return True
else:
arg_dims = [ops.convert_to_tensor(x).shape[arg_split_dim] for x in arg]
self_dims = [dim.value for dim in block_dimensions]
# If none of the operator dimensions are known, interpret the input as
# blockwise if its matching dimensions are unequal.
if all(self_d is None for self_d in self_dims):
# A nested tuple/list with a single outermost element is not blockwise
if len(arg_dims) == 1:
return False
elif any(dim != arg_dims[0] for dim in arg_dims):
return True
else:
raise ValueError(
"Parsing of the input structure is ambiguous. Please input "
"a blockwise iterable of `Tensor`s or a single `Tensor`.")
# If input dimensions equal the respective (known) blockwise operator
# dimensions, then the input is blockwise.
if all(self_d == arg_d or self_d is None
for self_d, arg_d in zip(self_dims, arg_dims)):
return True
# If input dimensions equals are all equal, and are greater than or equal
# to the sum of the known operator dimensions, interpret the input as
# blockwise.
# input is not blockwise.
self_dim = sum(self_d for self_d in self_dims if self_d is not None)
if all(s == arg_dims[0] for s in arg_dims) and arg_dims[0] >= self_dim:
return False
# If none of these conditions is met, the input shape is mismatched.
raise ValueError("Input dimension does not match operator dimension.")
else:
return False
def split_arg_into_blocks(block_dims, block_dims_fn, arg, axis=-1):
"""Split `x` into blocks matching `operators`'s `domain_dimension`.
Specifically, if we have a blockwise lower-triangular matrix, with block
sizes along the diagonal `[M_j, M_j] j = 0,1,2..J`, this method splits `arg`
on `axis` into `J` tensors, whose shape at `axis` is `M_j`.
Args:
block_dims: Iterable of `TensorShapes`.
block_dims_fn: Callable returning an iterable of `Tensor`s.
arg: `Tensor`. `arg` is split into `J` tensors.
axis: Python `Integer` representing the axis to split `arg` on.
Returns:
A list of `Tensor`s.
"""
block_sizes = [dim.value for dim in block_dims]
if any(d is None for d in block_sizes):
block_sizes = block_dims_fn()
return array_ops.split(arg, block_sizes, axis=axis)
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as _linalg
from tensorflow_probability.python.internal.backend.numpy import ops as _ops
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
from tensorflow_probability.python.internal.backend.numpy import private
distribution_util = private.LazyLoader(
"distribution_util", globals(),
"tensorflow_probability.python.internal._numpy.distribution_util")
tensorshape_util = private.LazyLoader(
"tensorshape_util", globals(),
"tensorflow_probability.python.internal._numpy.tensorshape_util")
| 37.726979
| 122
| 0.674389
|
742df2ad8cd3b9f7c3100a0e1ad98777af373bf7
| 57,590
|
py
|
Python
|
ovpr_atp/awards/migrations/0054_auto__add_field_awardacceptance_pta_modification.py
|
pawanacharya1979/Awdportal
|
f0ed6ad723d70fae4737e517d4dca07b2aef176a
|
[
"MIT"
] | null | null | null |
ovpr_atp/awards/migrations/0054_auto__add_field_awardacceptance_pta_modification.py
|
pawanacharya1979/Awdportal
|
f0ed6ad723d70fae4737e517d4dca07b2aef176a
|
[
"MIT"
] | null | null | null |
ovpr_atp/awards/migrations/0054_auto__add_field_awardacceptance_pta_modification.py
|
pawanacharya1979/Awdportal
|
f0ed6ad723d70fae4737e517d4dca07b2aef176a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AwardAcceptance.pta_modification'
db.add_column(u'awards_awardacceptance', 'pta_modification',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AwardAcceptance.pta_modification'
db.delete_column(u'awards_awardacceptance', 'pta_modification')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'awards.allowedcostschedule': {
'Meta': {'ordering': "['name']", 'object_name': 'AllowedCostSchedule'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'awards.award': {
'Meta': {'object_name': 'Award'},
'award_acceptance_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'award_closeout_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'award_management_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'award_management_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'award_modification_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['auth.User']"}),
'award_negotiation_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['auth.User']"}),
'award_setup_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extracted_to_eas': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subaward_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subaward_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'awards.awardacceptance': {
'Meta': {'object_name': 'AwardAcceptance'},
'admin_establishment': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'agency_award_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'award_acceptance_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'award_direct_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_issue_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'award_total_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contracting_official': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'current_modification': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'eas_status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'explanation': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'f_a_rate': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'fcoi_cleared_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'foreign_travel': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'full_f_a_recovery': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gmo_co_email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'gmo_co_phone_number': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_edited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mfa_investigators': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'phs_funded': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'project_title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'pta_modification': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sponsor_award_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'awards.awardcloseout': {
'Meta': {'object_name': 'AwardCloseout'},
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_assigned': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_edited': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'awards.awardmanagement': {
'Meta': {'object_name': 'AwardManagement'},
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_assigned': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_edited': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'awards.awardmanager': {
'Meta': {'object_name': 'AwardManager'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'gwid': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'system_user': ('django.db.models.fields.BooleanField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'awards.awardnegotiation': {
'Meta': {'object_name': 'AwardNegotiation'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'award_type': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'certificates_insurance': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'current_modification': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'data_security_restrictions': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'date_assigned': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'everify': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'foreign_restrictions': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'government_property': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gw_background_ip': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gw_doesnt_own_ip': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'insurance_renewal': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'is_edited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'negotiation_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'negotiation_status': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'negotiator': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'other_award_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'publication_restriction': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'related_other_agreements': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'related_other_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'retention_period': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subcontracting_plan': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'under_master_agreement': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.awardorganization': {
'Meta': {'ordering': "['name']", 'object_name': 'AwardOrganization'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'org_info1_meaning': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'org_info2_meaning': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'organization_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
u'awards.awardsetup': {
'Meta': {'object_name': 'AwardSetup'},
'allowed_cost_schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AllowedCostSchedule']", 'null': 'True', 'blank': 'True'}),
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True'}),
'award_setup_complete': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'award_template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardTemplate']", 'null': 'True', 'blank': 'True'}),
'bill_to_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_events': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'budget_restrictions': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'cfda_number': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.CFDANumber']", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'cost_sharing_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'date_assigned': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'document_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'eas_award_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'equipment_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'expanded_authority': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'federal_negotiated_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.FedNegRate']", 'null': 'True', 'blank': 'True'}),
'final_reports_due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'financial_reporting_oth': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'financial_reporting_req': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '14', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indirect_cost_schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.IndirectCost']", 'null': 'True', 'blank': 'True'}),
'invention_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'is_edited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nine_ninety_form_needed': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'onr_administered_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'patent_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'performance_site': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'pre_award_spending_auth': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'property_equip_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'property_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'qa_screening_complete': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'ready_for_eas_setup': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'record_destroy_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'sp_type': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'task_location': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'technical_reporting_oth': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'technical_reporting_req': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '14', 'blank': 'True'}),
'wait_for': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'awards.awardtemplate': {
'Meta': {'ordering': "['number']", 'object_name': 'AwardTemplate'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'awards.cfdanumber': {
'Meta': {'ordering': "['flex_value']", 'object_name': 'CFDANumber'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'flex_value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'primary_key': 'True'})
},
u'awards.easmapping': {
'Meta': {'unique_together': "(('interface', 'field', 'incoming_value', 'atp_model', 'atp_pk'),)", 'object_name': 'EASMapping'},
'atp_model': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'atp_pk': ('django.db.models.fields.IntegerField', [], {}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incoming_value': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'interface': ('django.db.models.fields.CharField', [], {'default': "'C'", 'max_length': '1'})
},
u'awards.fednegrate': {
'Meta': {'ordering': "['description']", 'object_name': 'FedNegRate'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'flex_value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'primary_key': 'True'})
},
u'awards.finalreport': {
'Meta': {'object_name': 'FinalReport'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'submitted_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.fundingsource': {
'Meta': {'ordering': "['number']", 'object_name': 'FundingSource'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'awards.indirectcost': {
'Meta': {'ordering': "['rate_schedule']", 'object_name': 'IndirectCost'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'rate_schedule': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'awards.keypersonnel': {
'Meta': {'object_name': 'KeyPersonnel'},
'academic_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'calendar_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'effort': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'project_role': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Proposal']"}),
'summer_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'})
},
u'awards.performancesite': {
'Meta': {'object_name': 'PerformanceSite'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Proposal']"}),
'ps_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_country': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'ps_duns': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ps_organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'ps_street1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_street2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_zipcode': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
u'awards.primesponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'PrimeSponsor'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.IntegerField', [], {})
},
u'awards.priorapproval': {
'Meta': {'object_name': 'PriorApproval'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_submitted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'request': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'})
},
u'awards.proposal': {
'Meta': {'object_name': 'Proposal', 'index_together': "[['award', 'is_first_proposal']]"},
'agency_name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.FundingSource']", 'null': 'True', 'blank': 'True'}),
'agency_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'application_type_code': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'are_human_subjects_used': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'are_vertebrate_animals_used': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'budget_first_per_end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'budget_first_per_start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cost_shr_mand_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'cost_shr_mand_is_committed': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'cost_shr_mand_source': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'cost_shr_vol_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'cost_shr_vol_is_committed': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'cost_shr_vol_source': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'department_name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardOrganization']", 'null': 'True', 'blank': 'True'}),
'departmental_id_primary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'departmental_id_secondary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'departmental_name_primary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'departmental_name_secondary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'division_name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'dummy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'federal_identifier': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'iacuc_approval_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'iacuc_protocol_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irb_protocol_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'irb_review_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'is_change_in_grantee_inst': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'is_edited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_first_proposal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_haz_mat': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'is_iacuc_review_pending': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'is_irb_review_pending': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'is_subcontract': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'lotus_agency_name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'lotus_department_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'lotus_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'principal_investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardManager']", 'null': 'True', 'blank': 'True'}),
'project_end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'project_start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'project_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'project_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'proposal_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'proposal_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'proposal_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'proposal_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'responsible_entity': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'sponsor_deadline': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'submission_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'total_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'tracking_number': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'who_is_prime': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.PrimeSponsor']", 'null': 'True', 'blank': 'True'}),
'will_involve_foreign_contract': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'will_involve_foreign_nationals': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'will_involve_shipment': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
u'awards.proposalintake': {
'Meta': {'object_name': 'ProposalIntake'},
'agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'announcement_link': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fcoi_submitted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'five_days_granted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'five_days_requested': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_edited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jit_request': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'jit_response_submitted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'phs_funded': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'prime_sponsor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'principal_investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardManager']", 'null': 'True', 'blank': 'True'}),
'program_announcement': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'proposal_due_to_aor': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'proposal_due_to_ovpr': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'proposal_due_to_sponsor': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'proposal_number': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'proposal_outcome': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'proposal_status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'spa1': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'awards.ptanumber': {
'Meta': {'object_name': 'PTANumber'},
'agency_award_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'agency_name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.FundingSource']", 'null': 'True', 'blank': 'True'}),
'allowed_cost_schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AllowedCostSchedule']", 'null': 'True', 'blank': 'True'}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'award_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'award_setup_complete': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'award_template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardTemplate']", 'null': 'True', 'blank': 'True'}),
'banner_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'cfda_number': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.CFDANumber']", 'null': 'True', 'blank': 'True'}),
'cs_banner_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'department_name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardOrganization']", 'null': 'True', 'blank': 'True'}),
'eas_award_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'eas_status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'federal_negotiated_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.FedNegRate']", 'null': 'True', 'blank': 'True'}),
'final_reports_due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indirect_cost_schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.IndirectCost']", 'null': 'True', 'blank': 'True'}),
'parent_banner_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'preaward_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'principal_investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardManager']", 'null': 'True', 'blank': 'True'}),
'project_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'project_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'ready_for_eas_setup': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'sp_type': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'sponsor_award_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sponsor_banner_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'task_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'total_pta_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'who_is_prime': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.PrimeSponsor']", 'null': 'True', 'blank': 'True'})
},
u'awards.reportsubmission': {
'Meta': {'object_name': 'ReportSubmission'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'submitted_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.subaward': {
'Meta': {'object_name': 'Subaward'},
'agreement_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'approval_expiration': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'assist': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'cfda_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'citi_cleared': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_information': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_fully_executed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'debarment_check': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'duns_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ein': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fain': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fcoi_cleared': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'ffata_reportable': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ffata_submitted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'funding_mechanism': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'gw_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'international': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_edited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modification_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'other_mechanism': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'reminder': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'risk': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'sent': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'subaward_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'subaward_ready': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'subaward_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'subrecipient_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'tech_report_due': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'tech_report_received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['awards']
| 102.47331
| 195
| 0.574787
|
71610a6f2ac030f04efc9eb38d9e9fb36116fca8
| 4,781
|
py
|
Python
|
skbio/stats/distance/tests/test_permanova.py
|
shiffer1/scikit-bio
|
a9b6eddad4e53c96f6132f27bc1c31e6b7d8ea1b
|
[
"BSD-3-Clause"
] | null | null | null |
skbio/stats/distance/tests/test_permanova.py
|
shiffer1/scikit-bio
|
a9b6eddad4e53c96f6132f27bc1c31e6b7d8ea1b
|
[
"BSD-3-Clause"
] | null | null | null |
skbio/stats/distance/tests/test_permanova.py
|
shiffer1/scikit-bio
|
a9b6eddad4e53c96f6132f27bc1c31e6b7d8ea1b
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import StringIO
from functools import partial
from unittest import TestCase, main
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal
from skbio import DistanceMatrix
from skbio.stats.distance import permanova
class TestPERMANOVA(TestCase):
"""All results were verified with R (vegan::adonis)."""
def setUp(self):
# Distance matrices with and without ties in the ranks, with 2 groups
# of equal size.
dm_ids = ['s1', 's2', 's3', 's4']
self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
self.df = pd.read_csv(
StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
's1,Control'), index_col=0)
self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
[1, 0, 3, 2],
[1, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
[1, 0, 3, 2],
[5, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
# Test with 3 groups of unequal size.
self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
'Treatment1', 'Control', 'Control']
# Equivalent grouping but with different labels -- groups should be
# assigned different integer labels but results should be the same.
self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']
self.dm_unequal = DistanceMatrix(
[[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
[1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
[0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
[0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
[1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
[1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
['s1', 's2', 's3', 's4', 's5', 's6'])
# Expected series index is the same across all tests.
self.exp_index = ['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations']
# Stricter series equality testing than the default.
self.assert_series_equal = partial(assert_series_equal,
check_index_type=True,
check_series_type=True)
def test_call_ties(self):
# Ensure we get the same results if we rerun the method using the same
# inputs. Also ensure we get the same results if we run the method
# using a grouping vector or a data frame with equivalent groupings.
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 2.0, 0.671, 999])
for _ in range(2):
np.random.seed(0)
obs = permanova(self.dm_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
for _ in range(2):
np.random.seed(0)
obs = permanova(self.dm_ties, self.df, column='Group')
self.assert_series_equal(obs, exp)
def test_call_no_ties(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, 0.332, 999])
np.random.seed(0)
obs = permanova(self.dm_no_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
def test_call_no_permutations(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, np.nan, 0])
obs = permanova(self.dm_no_ties, self.grouping_equal, permutations=0)
self.assert_series_equal(obs, exp)
def test_call_unequal_group_sizes(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 6, 3, 0.578848, 0.645,
999])
np.random.seed(0)
obs = permanova(self.dm_unequal, self.grouping_unequal)
self.assert_series_equal(obs, exp)
np.random.seed(0)
obs = permanova(self.dm_unequal, self.grouping_unequal_relabeled)
self.assert_series_equal(obs, exp)
if __name__ == '__main__':
main()
| 40.516949
| 78
| 0.533989
|
243fcc472d5f541f75d5d0cb5c1f8854d67105f3
| 6,982
|
py
|
Python
|
forge/blade/core/market/new_visualizer.py
|
narendasan/neural-mmo
|
36a588db0021cccd7275cebef2cbdc5ee8eb40d5
|
[
"MIT"
] | 4
|
2020-11-08T22:33:15.000Z
|
2020-11-21T15:45:43.000Z
|
forge/blade/core/market/new_visualizer.py
|
ThomasCloarec/neural-mmo
|
094744f49ad2cff179ec21e27285258903b70098
|
[
"MIT"
] | 1
|
2021-09-30T07:57:46.000Z
|
2021-10-02T00:39:11.000Z
|
forge/blade/core/market/new_visualizer.py
|
ThomasCloarec/neural-mmo
|
094744f49ad2cff179ec21e27285258903b70098
|
[
"MIT"
] | 2
|
2021-09-16T16:43:03.000Z
|
2021-09-28T18:12:41.000Z
|
from pdb import set_trace as T
import time
import ray
from functools import partial
from threading import Thread
from random import random
from bokeh.server.server import Server
from bokeh.models import ColumnDataSource
from bokeh.models import Band
from bokeh.plotting import curdoc, figure, show
from bokeh.themes import Theme
from tornado import gen
from tornado.ioloop import IOLoop
from forge.blade.core.market.config import *
# to run a demo with dummy data:
# $ bokeh serve --show visualizer.py
# market plans
# click on objects in market to display stats about them
# market overview tab
# -> trade bandwidth, demand, gdp
PORT=5009
class MarketVisualizer:
def __init__(self, keys, history_len: int = 512,
title: str = "NeuralMMO Market Data", x: str = "tick",
ylabel: str = "Dummy Values"):
"""Visualizes a stream of data with threaded refreshing
Args:
keys : List of object names (str) to be displayed on market
history_len : How far back to plot data
title : Title of graph
x : Name of x axis data
ylabel : Name of y axis on plot
"""
self.colors = 'blue red green yellow orange purple'.split()
self.data = {}
self.history_len = history_len
self.title = title
self.keys = keys
self.ylabel = ylabel
self.x = x
# TODO figure out a better way to ensure unique colors
assert len(self.keys) <= len(self.colors), 'Limited color pool'
for i, key in enumerate(self.keys):
self.data[key] = []
self.data[key+'lower'] = []
self.data[key+'upper'] = []
self.data['tick'] = []
def init(self, doc):
#Source must only be modified through a stream
self.source = ColumnDataSource(data=self.data)
#Enable theming
theme = Theme('forge/blade/core/market/theme.yaml')
doc.theme = theme
self.doc = doc
fig = figure(
plot_width=600,
plot_height=400,
tools='pan,xwheel_zoom,box_zoom,save,reset',
title='Neural MMO: Market Data',
x_axis_label=self.x,
y_axis_label=self.ylabel)
#Initialize plots
for i, key in enumerate(self.keys):
fig.line(
source=self.source,
x=self.x,
y=key,
color=self.colors[i],
line_width=LINE_WIDTH,
legend_label=key)
band = Band(
source=self.source,
base=self.x,
lower=key+'lower',
upper=key+'upper',
level='underlay',
line_color=self.colors[i],
line_width=1,
line_alpha=0.2,
fill_color=self.colors[i],
fill_alpha=0.2)
fig.add_layout(band)
#Set root
self.doc.add_root(fig)
self.fig = fig
@ray.remote
class BokehServer:
def __init__(self, market, *args, **kwargs):
""" Runs an asynchronous Bokeh data streaming server
Args:
market : The market to visualize
args : Additional arguments
kwargs : Additional keyword arguments
"""
self.visu = MarketVisualizer(*args, **kwargs)
self.market = market
server = Server(
{'/': self.init},
io_loop=IOLoop.current(),
port=PORT,
num_procs=1)
self.server = server
self.thread = None
self.tick = 0
server.start()
server.io_loop.add_callback(server.show, "/")
server.io_loop.start()
def init(self, doc):
'''Initialize document and threaded update loop
Args:
doc: A Bokeh document
'''
self.visu.init(doc)
self.doc = doc
self.thread = Thread(target=self.update, args=[])
self.thread.start()
self.started = True
def update(self):
'''Blocking update call to be run in a separate thread
Ingests packets from a remote market and streams to Bokeh client'''
self.n = 0
while True:
#Wait for thread to initialize
time.sleep(0.05)
if self.thread is None:
continue
#Get remote market data
packet = ray.get(self.market.getData.remote())
if packet is None:
continue
#Ingest market data
for key, val in packet.items():
if key[-3:] == 'std':
key = key[:-4]
dat = packet[key]
self.visu.data[key + 'lower'].append(dat - val)
self.visu.data[key + 'upper'].append(dat + val)
else:
self.visu.data[key].append(val)
#Stream to Bokeh client
self.doc.add_next_tick_callback(partial(self.stream))
self.tick += 1
@gen.coroutine
def stream(self):
'''Stream current data buffer to Bokeh client'''
self.visu.source.stream(self.visu.data, self.visu.history_len)
@ray.remote
class Middleman:
def __init__(self):
'''Remote data buffer for two processes to dump and recv data
This is probably not safe'''
self.data = None
def getData(self):
'''Get data from buffer
Returns:
data: From buffer
'''
data = self.data
self.data = None
return data
def setData(self, data):
'''Set buffer data
Args:
data: To set buffer
'''
self.data = data
class Market:
def __init__(self, items, middleman):
'''Dummy market emulator
Args:
items : List of item keys
middleman : A Middleman object'''
self.middleman = middleman
self.items = items
self.keys = items
self.data = {}
self.tick = 0
self.data['tick'] = 0
for i, key in enumerate(self.keys):
self.data[key] = 0
def update(self):
'''Updates market data and propagates to Bokeh server
Note: best to update all at once. Current version may cause bugs'''
for key, val in self.data.items():
self.data[key] = val + 0.2*(random() - 0.5)
if key == 'tick':
self.data[key] = self.tick
self.tick += 1
self.middleman.setData.remote(self.data)
# Example setup
if __name__ == '__main__':
ray.init()
ITEMS = ['Food', 'Water', 'Health', 'Melee', 'Range', 'Mage']
middleman = Middleman.remote()
market = Market(ITEMS, middleman)
visualizer = BokehServer.remote(middleman, ITEMS)
while True:
time.sleep(0.1)
market.update()
| 27.706349
| 78
| 0.545402
|
f6367b0d97cca9da857152bd98167d4adc8ce731
| 5,488
|
py
|
Python
|
lab/numpy/generic.py
|
patel-zeel/lab
|
cc0df2c03196863041e78fa4179445341e86958c
|
[
"MIT"
] | null | null | null |
lab/numpy/generic.py
|
patel-zeel/lab
|
cc0df2c03196863041e78fa4179445341e86958c
|
[
"MIT"
] | null | null | null |
lab/numpy/generic.py
|
patel-zeel/lab
|
cc0df2c03196863041e78fa4179445341e86958c
|
[
"MIT"
] | null | null | null |
from types import FunctionType
import numpy as np
import scipy.special as sps
from plum import Union
from . import B, dispatch, Numeric
from ..custom import bvn_cdf as _bvn_cdf
from ..types import NPNumeric, NPDType, Int, NPRandomState
__all__ = []
@dispatch
def isabstract(a: Numeric):
return False
@dispatch
def _jit_run(
f: FunctionType,
compilation_cache: dict,
jit_kw_args: dict,
*args: Union[Numeric, NPRandomState],
**kw_args,
):
# There is no JIT for NumPy, so just run the function.
return f(*args, **kw_args)
@dispatch
def isnan(a: Numeric):
return np.isnan(a)
@dispatch
def real(a: Numeric):
return np.real(a)
@dispatch
def imag(a: Numeric):
return np.imag(a)
@dispatch
def device(a: NPNumeric):
return "cpu"
@dispatch
def to_active_device(a: NPNumeric):
return a
@dispatch
def zeros(dtype: NPDType, *shape: Int):
return np.zeros(shape, dtype=dtype)
@dispatch
def ones(dtype: NPDType, *shape: Int):
return np.ones(shape, dtype=dtype)
@dispatch
def _eye2(dtype: NPDType, *shape: Int):
return np.eye(shape[0], shape[1], dtype=dtype)
@dispatch
def linspace(dtype: NPDType, a, b, num: Int):
return np.linspace(a, b, num, dtype=dtype)
@dispatch
def range(dtype: NPDType, start, stop, step):
return np.arange(start, stop, step, dtype=dtype)
@dispatch
def cast(dtype: NPDType, a: Numeric):
if B.dtype(a) == dtype:
return a
if hasattr(a, "astype"):
return a.astype(dtype, copy=False)
else:
return np.array(a, dtype=dtype)
@dispatch
def identity(a: Numeric):
return np.array(a)
@dispatch
def negative(a: Numeric):
return np.negative(a)
@dispatch
def abs(a: Numeric):
return np.abs(a)
@dispatch
def sign(a: Numeric):
return np.sign(a)
@dispatch
def sqrt(a: Numeric):
return np.sqrt(a)
@dispatch
def exp(a: Numeric):
return np.exp(a)
@dispatch
def log(a: Numeric):
return np.log(a)
@dispatch
def sin(a: Numeric):
return np.sin(a)
@dispatch
def arcsin(a: Numeric):
return np.arcsin(a)
@dispatch
def cos(a: Numeric):
return np.cos(a)
@dispatch
def arccos(a: Numeric):
return np.arccos(a)
@dispatch
def tan(a: Numeric):
return np.tan(a)
@dispatch
def arctan(a: Numeric):
return np.arctan(a)
@dispatch
def tanh(a: Numeric):
return np.tanh(a)
@dispatch
def arctanh(a: Numeric):
return np.arctanh(a)
@dispatch
def erf(a: Numeric):
return sps.erf(a)
@dispatch
def add(a: Numeric, b: Numeric):
return np.add(a, b)
@dispatch
def subtract(a: Numeric, b: Numeric):
return np.subtract(a, b)
@dispatch
def multiply(a: Numeric, b: Numeric):
return np.multiply(a, b)
@dispatch
def divide(a: Numeric, b: Numeric):
return np.divide(a, b)
@dispatch
def power(a: Numeric, b: Numeric):
return np.power(a, b)
@dispatch
def minimum(a: Numeric, b: Numeric):
return np.minimum(a, b)
@dispatch
def maximum(a: Numeric, b: Numeric):
return np.maximum(a, b)
@dispatch
def min(a: Numeric, axis: Union[Int, None] = None, squeeze: bool = True):
return np.min(a, axis=axis, keepdims=not squeeze)
@dispatch
def argmin(a: Numeric, axis: Union[Int, None] = None):
return np.argmin(a, axis=axis)
@dispatch
def max(a: Numeric, axis: Union[Int, None] = None, squeeze: bool = True):
return np.max(a, axis=axis, keepdims=not squeeze)
@dispatch
def argmax(a: Numeric, axis: Union[Int, None] = None):
return np.argmax(a, axis=axis)
@dispatch
def sum(a: Numeric, axis: Union[Int, None] = None, squeeze: bool = True):
return np.sum(a, axis=axis, keepdims=not squeeze)
@dispatch
def prod(a: Numeric, axis: Union[Int, None] = None, squeeze: bool = True):
return np.prod(a, axis=axis, keepdims=not squeeze)
@dispatch
def mean(a: Numeric, axis: Union[Int, None] = None, squeeze: bool = True):
return np.mean(a, axis=axis, keepdims=not squeeze)
@dispatch
def std(a: Numeric, axis: Union[Int, None] = None, squeeze: bool = True):
return np.std(a, axis=axis, ddof=0, keepdims=not squeeze)
@dispatch
def all(a: Numeric, axis: Union[Int, None] = None, squeeze: bool = True):
return np.all(a, axis=axis, keepdims=not squeeze)
@dispatch
def any(a: Numeric, axis: Union[Int, None] = None, squeeze: bool = True):
return np.any(a, axis=axis, keepdims=not squeeze)
@dispatch
def lt(a: Numeric, b: Numeric):
return np.less(a, b)
@dispatch
def le(a: Numeric, b: Numeric):
return np.less_equal(a, b)
@dispatch
def gt(a: Numeric, b: Numeric):
return np.greater(a, b)
@dispatch
def ge(a: Numeric, b: Numeric):
return np.greater_equal(a, b)
@dispatch
def eq(a: Numeric, b: Numeric):
return np.equal(a, b)
@dispatch
def ne(a: Numeric, b: Numeric):
return np.not_equal(a, b)
@dispatch
def bvn_cdf(a: Numeric, b: Numeric, c: Numeric):
return _bvn_cdf(a, b, c)
@dispatch
def where(condition: Numeric, a: Numeric, b: Numeric):
return np.where(condition, a, b)
@dispatch
def sort(a: Numeric, axis: Int = -1, descending: bool = False):
if descending:
return -np.sort(-a, axis=axis)
else:
return np.sort(a, axis=axis)
@dispatch
def argsort(a: Numeric, axis: Int = -1, descending: bool = False):
if descending:
return np.argsort(-a, axis=axis)
else:
return np.argsort(a, axis=axis)
@dispatch
def quantile(a: Numeric, q: Numeric, axis: Union[Int, None] = None):
return np.quantile(a, q, axis=axis, interpolation="linear")
| 17.422222
| 74
| 0.663812
|
1e14d6651d60705702f1524ab63f8a9985cfcb52
| 5,067
|
py
|
Python
|
webscan_backend/views.py
|
imfiver/Sec-Tools
|
a828e31c2e371c37f1256f0a574707a24776530d
|
[
"Apache-2.0"
] | 144
|
2021-11-05T10:45:05.000Z
|
2022-03-31T03:17:19.000Z
|
webscan_backend/views.py
|
imfiver/Sec-Tools
|
a828e31c2e371c37f1256f0a574707a24776530d
|
[
"Apache-2.0"
] | 6
|
2021-11-07T02:47:41.000Z
|
2022-03-06T05:50:15.000Z
|
webscan_backend/views.py
|
imfiver/Sec-Tools
|
a828e31c2e371c37f1256f0a574707a24776530d
|
[
"Apache-2.0"
] | 41
|
2021-11-07T13:35:02.000Z
|
2022-03-29T00:09:36.000Z
|
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
# Create your views here.
# -*- coding:utf-8 -*-
from django.views.decorators.csrf import csrf_exempt
from .plugins.common.common import success, error, addslashes, getdomain, getdomainip, check_ip, check_url
import time
from .plugins.common.common import getuserip
from .plugins.loginfo.loginfo import LogHandler
MYLOGGER = LogHandler(time.strftime("%Y-%m-%d", time.localtime()) + 'log')
@csrf_exempt # 标识一个视图可以被跨域访问
@login_required # 用户登陆系统才可以访问
def port_scan(request):
"""
获取开放端口列表
"""
from .plugins.portscan.portscan import ScanPort
ip = request.POST.get('ip')
if check_ip(ip):
result = ScanPort(ip).pool()
MYLOGGER.info('M:' + request.method + ' P:' + request.path + ' UPOST:' + str(request.POST) + ' SC:200 UIP:' + getuserip(request) + ' RDATA:' + str(result))
return success(200, result, 'ok!')
return error(400, '请填写正确的IP地址', 'error')
@csrf_exempt
def info_leak(request):
"""
信息泄漏检测
"""
from .plugins.infoleak.infoleak import get_infoleak
url = check_url(request.POST.get('url'))
if url:
result = get_infoleak(url)
MYLOGGER.info('M:' + request.method + ' P:' + request.path + ' UPOST:' + str(request.POST) + ' SC:200 UIP:' + getuserip(request) + ' RDATA:' + str(result))
return success(200, result, 'ok')
return error(400, '请填写正确的URL地址', 'error')
@csrf_exempt
def getwebsideinfo(request):
"""
获取旁站信息
"""
from .plugins.webside.webside import get_side_info
ip = request.POST.get('ip')
if check_ip(ip):
result = get_side_info(ip)
if result:
return success(200, result, 'ok')
return error(400, '未找到旁站信息!', 'error')
return error(400, '请填写正确的IP地址', 'error')
@csrf_exempt
def baseinfo(request):
"""
返回网站的基本信息接口
"""
from .plugins.baseinfo.baseinfo import getbaseinfo
url = check_url(request.POST.get('url'))
if url:
res = getbaseinfo(url)
MYLOGGER.info('M:' + request.method + ' P:' + request.path + ' UPOST:' + str(request.POST) + ' SC:200 UIP:' + getuserip(request) + ' RDATA:' + str(res))
return success(res['code'], res, res['msg'])
return error(400, '请填写正确的URL地址', '请输入正确的网址, 例如:http://example.cn')
@csrf_exempt
def webweight(request):
"""
获取网站权重
"""
from .plugins.webweight.webweight import get_web_weight
url = check_url(request.POST.get('url'))
if url:
result = get_web_weight(url)
MYLOGGER.info('M:' + request.method + ' P:' + request.path + ' UPOST:' + str(
request.POST) + ' SC:200 UIP:' + getuserip(request) + ' RDATA:' + str(result))
return success(200, result, 'ok')
return error(400, '请填写正确的URL地址', 'error')
@csrf_exempt
def iplocating(request):
"""
ip定位
"""
from .plugins.iplocating.iplocating import get_locating
ip = request.POST.get('ip')
if check_ip(ip):
result = get_locating(ip)
return success(200, result, 'ok')
return error(400, '请填写正确的IP地址', 'error')
@csrf_exempt
def isexistcdn(request):
"""
判断当前域名是否使用了CDN
"""
from .plugins.cdnexist.cdnexist import iscdn
url = check_url(request.POST.get('url'))
if url:
result_str = iscdn(url)
if result_str == '目标站点不可访问':
return success(200, result_str, '网络错误')
if result_str:
result_str = '存在CDN(源IP可能不正确)'
else:
result_str = '无CDN'
return success(200, result_str, 'Success!')
return error(400, '请填写正确的IP地址', 'error')
@csrf_exempt
def is_waf(request):
"""
判断当前域名是否使用了WAF
"""
from .plugins.waf.waf import getwaf
url = check_url(request.POST.get('url'))
if url:
return success(200, getwaf(url), 'ok')
return error(400, '请填写正确的URL地址', 'error')
@csrf_exempt
def what_cms(request):
"""
判断当前域名使用了什么框架,cms等指纹信息
"""
from .plugins.whatcms.whatcms import getwhatcms
url = check_url(request.POST.get('url'))
if url:
result = getwhatcms(url)
MYLOGGER.info('M:' + request.method + ' P:' + request.path + ' UPOST:' + str(
request.POST) + ' SC:200 UIP:' + getuserip(request) + ' RDATA:' + str(result))
return success(200, result, 'ok')
return error(400, '请填写正确的URL地址', 'error')
@csrf_exempt
def _subdomain(request):
'''子域名扫描'''
from .plugins.subdomain.subdomain import get_subdomain
domain = request.POST.get('domain')
print(domain)
if domain:
result = get_subdomain(domain)
print(len(result))
MYLOGGER.info('M:' + request.method + ' P:' + request.path + ' UPOST:' + str(request.POST) + ' SC:200 UIP:' + getuserip(request) + ' RDATA:' + str(result))
return success(200, result, 'ok')
return error(400, '请填写正确的URL地址', 'error')
| 32.902597
| 164
| 0.610026
|
dfce96b8227f698c658b065374b735f3ecdb0d7f
| 2,844
|
py
|
Python
|
src/hexagonal/services/raw_python_project_importer.py
|
rfrezino/hexagonal-sanity-check
|
78c8711d9be6ec173abead4ab344f7ac57d5d4ac
|
[
"MIT"
] | 1
|
2022-03-14T10:17:38.000Z
|
2022-03-14T10:17:38.000Z
|
src/hexagonal/services/raw_python_project_importer.py
|
rfrezino/hexagonal-sanity-check
|
78c8711d9be6ec173abead4ab344f7ac57d5d4ac
|
[
"MIT"
] | null | null | null |
src/hexagonal/services/raw_python_project_importer.py
|
rfrezino/hexagonal-sanity-check
|
78c8711d9be6ec173abead4ab344f7ac57d5d4ac
|
[
"MIT"
] | 2
|
2021-12-14T10:35:24.000Z
|
2022-01-31T14:17:36.000Z
|
import os
from glob import glob
from typing import List
from hexagonal.domain.raw_python_file import RawPythonFile
from hexagonal.services.hexagonal_composition import HexagonalComposition
from hexagonal.services.raw_python_file_builder import RawPythonFileBuilder
class RawPythonFilesImporter:
_composition: HexagonalComposition
_source_folder_full_path: str
_excluded_folders: List[str]
@property
def source_folder_full_path(self):
return self._source_folder_full_path
def __init__(self, source_folder_full_path: str, hexagonal_composition: HexagonalComposition,
excluded_folders: List[str]):
if not os.path.isabs(source_folder_full_path):
raise Exception("The param source_folder_full_path must have the source's folder full path.")
if not os.path.isdir(source_folder_full_path):
raise Exception('Source folder not found.')
if not excluded_folders:
self._excluded_folders = []
else:
self._excluded_folders = excluded_folders
self._source_folder_full_path = source_folder_full_path
self._composition = hexagonal_composition
def import_raw_python_files(self) -> List[RawPythonFile]:
project_files_paths = self._get_all_python_files_paths_from_source_folder()
python_project_files = self._convert_files_paths_in_python_project_files(python_files_paths=project_files_paths)
return python_project_files
def _get_all_python_files_paths_from_source_folder(self) -> List[str]:
all_files = [os.path.abspath(y) for x in os.walk(self._source_folder_full_path)
for y in glob(os.path.join(x[0], '*.py'))]
result = []
for file in all_files:
include_file = True
if f'{os.sep}.' in file:
continue
for excluded_dir in self._excluded_folders:
file_relative_path = file.replace(self._source_folder_full_path, '')
if file_relative_path.startswith(excluded_dir):
include_file = False
break
if include_file:
result.append(file)
return result
def _convert_files_paths_in_python_project_files(self, python_files_paths: List[str]) -> List[RawPythonFile]:
result = []
for python_files_path in python_files_paths:
result.append(self._convert_file_path_in_python_project_file(python_file_path=python_files_path))
return result
def _convert_file_path_in_python_project_file(self, python_file_path: str) -> RawPythonFile:
builder = RawPythonFileBuilder(file_full_path=python_file_path,
project_source_folder_full_path=self._source_folder_full_path)
return builder.build()
| 38.958904
| 120
| 0.701828
|
80cb853821050314a843d25e8e8e6e9e41411d5c
| 965
|
py
|
Python
|
huobi/model/mbprequest.py
|
xiaohuid/huobi_Python
|
ebc84b2fc560f77fd77457f36ff91906c43646e3
|
[
"Apache-2.0"
] | 1
|
2020-12-28T07:04:45.000Z
|
2020-12-28T07:04:45.000Z
|
huobi/model/mbprequest.py
|
xiaohuid/huobi_Python
|
ebc84b2fc560f77fd77457f36ff91906c43646e3
|
[
"Apache-2.0"
] | null | null | null |
huobi/model/mbprequest.py
|
xiaohuid/huobi_Python
|
ebc84b2fc560f77fd77457f36ff91906c43646e3
|
[
"Apache-2.0"
] | 1
|
2022-03-27T10:36:04.000Z
|
2022-03-27T10:36:04.000Z
|
from huobi.constant.result import OutputKey
from huobi.impl.utils.channelparser import ChannelParser
from huobi.model import *
class MbpRequest:
"""
The price depth received by subscription of price depth.
:member
symbol: The symbol you subscribed.
timestamp: The UNIX formatted timestamp generated by server in UTC.
data: The price depth.
"""
def __init__(self):
self.symbol = ""
self.rep = ""
self.id = ""
self.data = PriceDepth()
@staticmethod
def json_parse(json_wrapper):
rep = json_wrapper.get_string(OutputKey.KeyChannelRep)
parse = ChannelParser(rep)
mbp_event = MbpRequest()
mbp_event.symbol = parse.symbol
mbp_event.id = json_wrapper.get_int("id")
mbp_event.rep = rep
data = json_wrapper.get_object(OutputKey.KeyData)
mbp = Mbp.json_parse(data)
mbp_event.data = mbp
return mbp_event
| 26.081081
| 75
| 0.645596
|
e6cd3f473e84bccb38060ab2d97b66fee5917e66
| 3,422
|
py
|
Python
|
monitoring/tracer/check_rid_flights.py
|
rpai1/dss
|
79d8110c336851b155a6e5417692ec68b70c0c07
|
[
"Apache-2.0"
] | null | null | null |
monitoring/tracer/check_rid_flights.py
|
rpai1/dss
|
79d8110c336851b155a6e5417692ec68b70c0c07
|
[
"Apache-2.0"
] | null | null | null |
monitoring/tracer/check_rid_flights.py
|
rpai1/dss
|
79d8110c336851b155a6e5417692ec68b70c0c07
|
[
"Apache-2.0"
] | null | null | null |
#!env/bin/python3
import argparse
import logging
from typing import Dict
import requests
import s2sphere
import yaml
from monitoring.monitorlib import rid
from monitoring.tracer import polling
from monitoring.tracer.resources import ResourceSet
logging.basicConfig()
_logger = logging.getLogger('check_rid_flights')
_logger.setLevel(logging.DEBUG)
def _json_or_error(resp: requests.Response) -> Dict:
try:
json = resp.json()
except ValueError:
json = None
if resp == 200 and json:
return json
else:
info = {
'request': {
'url': resp.request.url,
'Authorization': resp.request.headers.get('Authorization', '<None>'),
},
'response': {
'code': resp.status_code,
'elapsed': resp.elapsed.total_seconds()
}
}
if json is None:
info['response']['body'] = resp.content
else:
info['response']['json'] = json
return info
def get_flights(resources: ResourceSet, flights_url: str, area: s2sphere.LatLngRect, include_recent_positions: bool) -> Dict:
resp = resources.dss_client.get(flights_url, params={
'view': '{},{},{},{}'.format(
area.lat_lo().degrees,
area.lng_lo().degrees,
area.lat_hi().degrees,
area.lng_hi().degrees,
),
'include_recent_positions': 'true' if include_recent_positions else 'false',
}, scope=rid.SCOPE_READ)
return _json_or_error(resp)
def get_flight_details(resources: ResourceSet, flights_url: str, id: str) -> Dict:
resp = resources.dss_client.get(flights_url + '/{}/details'.format(id), scope=rid.SCOPE_READ)
return _json_or_error(resp)
def get_all_flights(resources: ResourceSet, area: s2sphere.LatLngRect, include_recent_positions: bool) -> Dict:
isa_result = polling.poll_rid_isas(resources, area)
if not isa_result.success:
return {
'error': {
'description': 'Failed to obtain ISAs',
'response': isa_result.to_json(),
}
}
if not isa_result.success.objects:
return {
'error': {
'description': 'No ISAs present in requested area',
}
}
result = {}
for isa_id, isa in isa_result.success.objects.items():
flights_url = isa.get('flights_url', None)
if flights_url is None:
result[isa_id] = {'error': {'description': 'Missing flights_url'}}
continue
isa_flights = get_flights(resources, flights_url, area, include_recent_positions)
if 'flights' not in isa_flights['response'].get('json', {}):
isa_flights['description'] = 'Missing flights field'
result[isa_id] = {'error': isa_flights}
continue
for flight in isa_flights['response']['json']['flights']:
flight_id = flight.get('id', None)
if flight_id is None:
flight['details'] = {'error': {'description': 'Missing id field'}}
continue
flight['details'] = get_flight_details(resources, flights_url, flight['id'])
result[isa_id] = isa_flights
return result
def main():
parser = argparse.ArgumentParser()
ResourceSet.add_arguments(parser)
parser.add_argument('--include-recent-positions', action='store_true', default=False, help='If set, request recent positions when polling for flight data')
args = parser.parse_args()
resources = ResourceSet.from_arguments(args)
result = get_all_flights(resources, resources.area, args.include_recent_positions)
print(yaml.dump(result))
if __name__ == "__main__":
main()
| 29.5
| 157
| 0.680304
|
150607637b7cd27c4cd7ac06874d377e68159776
| 4,982
|
py
|
Python
|
.ycm_extra_conf.py
|
uasys/ACF-Coalescing-LLVM
|
db811bda15bb7c4a951644130ab55f5cb5848351
|
[
"MIT"
] | 1
|
2020-06-23T00:18:56.000Z
|
2020-06-23T00:18:56.000Z
|
.ycm_extra_conf.py
|
uasys/ACF-Coalescing-LLVM
|
db811bda15bb7c4a951644130ab55f5cb5848351
|
[
"MIT"
] | null | null | null |
.ycm_extra_conf.py
|
uasys/ACF-Coalescing-LLVM
|
db811bda15bb7c4a951644130ab55f5cb5848351
|
[
"MIT"
] | null | null | null |
# Generated by YCM Generator at 2017-01-06 11:15:26.344080
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
flags = [
'-x',
'c++',
'-DGpuAnalysis_EXPORTS',
'-D_GNU_SOURCE',
'-D__STDC_CONSTANT_MACROS',
'-D__STDC_FORMAT_MACROS',
'-D__STDC_LIMIT_MACROS',
'-I/home/taylor/dev/llvm/include',
'-I/home/taylor/git/coalescingsa-llvm/wali-opennwa-4.1/Source',
'-I/home/taylor/git/llvm/include',
'-std=gnu++11',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.C', '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.H', '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| 33.891156
| 79
| 0.718386
|
4fabbc336161434c7f78966242944eb239c20406
| 5,729
|
py
|
Python
|
init.py
|
lccatala/ENet-Real-Time-Semantic-Segmentation
|
e8efbfa8232000baf78910b9b91282f1ee193902
|
[
"BSD-3-Clause"
] | 268
|
2019-01-17T09:19:52.000Z
|
2022-02-17T05:19:21.000Z
|
init.py
|
lccatala/ENet-Real-Time-Semantic-Segmentation
|
e8efbfa8232000baf78910b9b91282f1ee193902
|
[
"BSD-3-Clause"
] | 25
|
2019-01-24T08:15:06.000Z
|
2021-06-24T21:44:50.000Z
|
init.py
|
lccatala/ENet-Real-Time-Semantic-Segmentation
|
e8efbfa8232000baf78910b9b91282f1ee193902
|
[
"BSD-3-Clause"
] | 75
|
2019-01-17T10:43:09.000Z
|
2021-09-29T19:28:02.000Z
|
import numpy as np
import argparse
from train import *
from test import *
color_map = {
'unlabeled' : ( 0, 0, 0),
'dynamic' : (111, 74, 0),
'ground' : ( 81, 0, 81),
'road' : (128, 64,128),
'sidewalk' : (244, 35,232),
'parking' : (250,170,160),
'rail track' : (230,150,140),
'building' : ( 70, 70, 70),
'wall' : (102,102,156),
'fence' : (190,153,153),
'guard rail' : (180,165,180),
'bridge' : (150,100,100),
'tunnel' : (150,120, 90),
'pole' : (153,153,153),
'traffic light' : (250,170, 30),
'traffic sign' : (220,220, 0),
'vegetation' : (107,142, 35),
'terrain' : (152,251,152),
'sky' : ( 70,130,180),
'person' : (220, 20, 60),
'rider' : (255, 0, 0),
'car' : ( 0, 0,142),
'truck' : ( 0, 0, 70),
'bus' : ( 0, 60,100),
'caravan' : ( 0, 0, 90),
'trailer' : ( 0, 0,110),
'train' : ( 0, 80,100),
'motorcycle' : ( 0, 0,230),
'bicycle' : (119, 11, 32)
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m',
type=str,
default='./datasets/CamVid/ckpt-camvid-enet.pth',
help='The path to the pretrained enet model')
parser.add_argument('-i', '--image-path',
type=str,
help='The path to the image to perform semantic segmentation')
parser.add_argument('-rh', '--resize-height',
type=int,
default=512,
help='The height for the resized image')
parser.add_argument('-rw', '--resize-width',
type=int,
default=512,
help='The width for the resized image')
parser.add_argument('-lr', '--learning-rate',
type=float,
default=5e-4,
help='The learning rate')
parser.add_argument('-bs', '--batch-size',
type=int,
default=10,
help='The batch size')
parser.add_argument('-wd', '--weight-decay',
type=float,
default=2e-4,
help='The weight decay')
parser.add_argument('-c', '--constant',
type=float,
default=1.02,
help='The constant used for calculating the class weights')
parser.add_argument('-e', '--epochs',
type=int,
default=102,
help='The number of epochs')
parser.add_argument('-nc', '--num-classes',
type=int,
default=12,
help='Number of unique classes')
parser.add_argument('-se', '--save-every',
type=int,
default=10,
help='The number of epochs after which to save a model')
parser.add_argument('-iptr', '--input-path-train',
type=str,
default='./datasets/CamVid/train/',
help='The path to the input dataset')
parser.add_argument('-lptr', '--label-path-train',
type=str,
default='./datasets/CamVid/trainannot/',
help='The path to the label dataset')
parser.add_argument('-ipv', '--input-path-val',
type=str,
default='./datasets/CamVid/val/',
help='The path to the input dataset')
parser.add_argument('-lpv', '--label-path-val',
type=str,
default='./datasets/CamVid/valannot/',
help='The path to the label dataset')
parser.add_argument('-iptt', '--input-path-test',
type=str,
default='./datasets/CamVid/test/',
help='The path to the input dataset')
parser.add_argument('-lptt', '--label-path-test',
type=str,
default='./datasets/CamVid/testannot/',
help='The path to the label dataset')
parser.add_argument('-pe', '--print-every',
type=int,
default=1,
help='The number of epochs after which to print the training loss')
parser.add_argument('-ee', '--eval-every',
type=int,
default=10,
help='The number of epochs after which to print the validation loss')
parser.add_argument('--cuda',
type=bool,
default=False,
help='Whether to use cuda or not')
parser.add_argument('--mode',
choices=['train', 'test'],
default='train',
help='Whether to train or test')
FLAGS, unparsed = parser.parse_known_args()
FLAGS.cuda = torch.device('cuda:0' if torch.cuda.is_available() and FLAGS.cuda \
else 'cpu')
if FLAGS.mode.lower() == 'train':
train(FLAGS)
elif FLAGS.mode.lower() == 'test':
test(FLAGS)
else:
raise RuntimeError('Unknown mode passed. \n Mode passed should be either \
of "train" or "test"')
| 36.490446
| 93
| 0.440915
|
4282b59f442a5388a7e9404b7377ddcf51c94b32
| 2,283
|
py
|
Python
|
unifuncnet/Fetchers/Gene_Fetchers/Gene_Fetcher.py
|
PedroMTQ/UniFuncNet
|
cde0170241aff8502862df84756662fae7f5d8b3
|
[
"MIT"
] | 4
|
2022-02-14T15:49:31.000Z
|
2022-03-22T10:37:36.000Z
|
unifuncnet/Fetchers/Gene_Fetchers/Gene_Fetcher.py
|
PedroMTQ/UniFuncNet
|
cde0170241aff8502862df84756662fae7f5d8b3
|
[
"MIT"
] | null | null | null |
unifuncnet/Fetchers/Gene_Fetchers/Gene_Fetcher.py
|
PedroMTQ/UniFuncNet
|
cde0170241aff8502862df84756662fae7f5d8b3
|
[
"MIT"
] | null | null | null |
from unifuncnet.Fetchers.Global_Fetcher import *
class Gene_Fetcher(Global_Fetcher):
def __init__(self,gene_id,extra_args={},memory_storage=None):
Global_Fetcher.__init__(self,memory_storage=memory_storage)
self.gene_id=gene_id
self.db= None
self.gene=None
self.convergence_args={}
#if no memory_storage is present from one of the pipelines or previous fetchers we assign it one and initialize the memory
if not self.memory_storage:
from unifuncnet.Searchers.Gene_Searcher import Gene_Searcher
self.memory_storage = Gene_Searcher()
def add_gene(self):
if self.get_gene():
match= self.get_gene_match()
if match:
if match is not self.get_gene():
match.unite_instances(self.get_gene(),always_unite=True)
self.set_gene(match)
else:
self.memory_storage.add_gene(self.get_gene())
def get_gene(self):
return self.gene
def set_gene(self,match_instance):
self.gene=match_instance
def get_gene_match(self):
if self.get_gene():
return self.memory_storage.get_biological_instance('genes',self.get_gene())
else:
return self.memory_storage.get_biological_instance('genes',self.gene_id,self.db)
def find_protein(self,query_id=None,extra_args={},convergence_search=False):
memory_type=get_instance_type(self.memory_storage)
if memory_type=='Protein_Searcher':
return self.memory_storage.run_searcher(bio_db=self.db,bio_query=query_id,extra_args=extra_args,convergence_search=convergence_search)
else:
return self.memory_storage.protein_searcher.run_searcher(bio_db=self.db,bio_query=query_id,extra_args=extra_args,convergence_search=convergence_search)
def find_reaction(self,query_id=None,extra_args={}):
memory_type=get_instance_type(self.memory_storage)
if memory_type=='Reaction_Searcher':
return self.memory_storage.run_searcher(bio_db=self.db,bio_query=query_id,extra_args=extra_args)
else:
return self.memory_storage.reaction_searcher.run_searcher(bio_db=self.db,bio_query=query_id,extra_args=extra_args)
| 42.277778
| 163
| 0.700394
|
4bfaac8ddefcae9436f814ecc6c2419a61946633
| 7,213
|
py
|
Python
|
src/multi_label.py
|
nagmakhan/multi-label-analysis
|
4d2a16e39c520a2a519e9f1827feece7408d3225
|
[
"MIT"
] | 5
|
2020-07-22T09:19:05.000Z
|
2021-10-11T12:58:52.000Z
|
src/multi_label.py
|
nagmakhan/multi-label-analysis
|
4d2a16e39c520a2a519e9f1827feece7408d3225
|
[
"MIT"
] | 3
|
2020-09-24T03:22:00.000Z
|
2021-05-15T05:42:15.000Z
|
src/multi_label.py
|
nagmakhan/multi-label-analysis
|
4d2a16e39c520a2a519e9f1827feece7408d3225
|
[
"MIT"
] | 2
|
2020-03-23T13:29:53.000Z
|
2020-04-01T13:23:34.000Z
|
from sklearn.model_selection import train_test_split
from sklearn.metrics import hamming_loss, fbeta_score, confusion_matrix, precision_recall_fscore_support
from skmultilearn.problem_transform import BinaryRelevance # to help stratify while sampling
import scipy.io
import numpy as np
import time
## Loading data
print("Loading data")
# Raw features - do data pre-process
features = scipy.io.loadmat('dataset/features.mat')
features = features['features']
features = features['val']
features = features[0]
# labels
labels = scipy.io.loadmat('dataset/UCMERCED/multilabels/LandUse_multilabels.mat')
labels = labels['labels']
labels = np.squeeze(np.transpose(labels,(1,0)))
# ## Data pre-process - mean of all node features for feeding to conventional classiifers which can not deal with graphs
print("Pre-processing data")
graph_size = np.array([s.shape[0] for s in features]).astype(np.int64)
largest_graph = max(graph_size)
features_mat = np.zeros((np.shape(features)[0], largest_graph, np.shape(features[0])[1]))
for i in range(np.shape(features)[0]):
features_mat[i,:,:] = np.pad(features[i].astype(np.float32), ((0,largest_graph-features[i].shape[0]), (0, 0)), 'constant', constant_values=(0))
features = np.mean(features_mat, axis=1) #final mean features
## Analysis for GCN
print('Analysis for GCN..')
index = scipy.io.loadmat('test_train_idx.mat')
feat = scipy.io.loadmat('gcn_features.mat')
predictions = feat['pred_labels']
train_ind = np.squeeze(index['train_ind'])
test_ind = np.squeeze(index['test_ind'])
val_ind = np.squeeze(index['val_ind'])
X_test = features[test_ind,:]
y_test = labels[test_ind, :]
print(np.sum(labels,axis=0))
print (np.sum(predictions,axis=0))
# print y_test
predictions = np.squeeze(predictions[test_ind,:])
print (np.sum(predictions,axis=0))
# print predictions
for i in range(0,17):
print(confusion_matrix(y_test[:,i], predictions[:,i]))
# # # score calculation
print("Score calculation..")
precision, recall, fscore, _ = precision_recall_fscore_support(y_test,predictions ,average='macro')
print("Precision macro:%f" % precision)
print("Recall macro:%f" % recall)
print("F-score macro:%f" % fscore)
precision, recall, fscore, _ = precision_recall_fscore_support(y_test,predictions ,average='samples')
print("Precision samples:%f" % precision)
print("Recall samples:%f" % recall)
print("F-score samples:%f" % fscore)
# using same index set for train, test and val as GCN for the other classifiers
X_train = features[train_ind,:]
y_train = labels[train_ind, :]
X_val = features[val_ind,:]
y_val = labels[val_ind, :]
X_test = features[test_ind,:]
y_test = labels[test_ind, :]
## Multi-label classification
## Using KNN
from skmultilearn.adapt import MLkNN
# for last-layer, k=13
classifier = MLkNN(k=13) #tuned
# train
start_at = time.time()
print("Training classifier KNN")
classifier.fit(X_train, y_train)
print("Training finished in time %g seconds" % (time.time()-start_at))
# # # predict
print("Predicting")
predictions = (classifier.predict(X_test))
predictions = predictions.todense();
predictions_val = ((classifier.predict(X_val)))
predictions_val = predictions_val.todense();
predictions_train = ((classifier.predict(X_train)))
predictions_train = predictions_train.todense();
predictions_all = np.vstack((predictions, predictions_val, predictions_train))
prob_test = ((classifier.predict_proba(X_test)))
prob_test = prob_test.todense()
prob_val = ((classifier.predict_proba(X_val)))
prob_val = prob_val.todense()
prob_train = ((classifier.predict_proba(X_train)))
prob_train = prob_train.todense()
prob = np.vstack((prob_test,prob_val,prob_train))
# # # score calculation
print("Score calculation..")
precision, recall, fscore, _ = precision_recall_fscore_support(y_test,predictions ,average='macro')
print("Precision macro:%f" % precision)
print("Recall macro:%f" % recall)
print("F-score macro:%f" % fscore)
precision, recall, fscore, _ = precision_recall_fscore_support(y_test,predictions ,average='samples')
print("Precision samples:%f" % precision)
print("Recall samples:%f" % recall)
print("F-score samples:%f" % fscore)
# ## Using Gaussian NB
from skmultilearn.problem_transform import LabelPowerset
from sklearn.naive_bayes import GaussianNB
# initialize Label Powerset multi-label classifier
# with a gaussian naive bayes base classifier
classifier = BinaryRelevance(GaussianNB())
# train
start_at=time.time()
print("Training classifier gaussian NB")
classifier.fit(X_train, y_train)
print("Training finished in time %g seconds" % (time.time()-start_at))
# predict
print("Predicting")
predictions = classifier.predict(X_test)
predictions = (classifier.predict(X_test))
predictions = predictions.todense();
predictions_val = ((classifier.predict(X_val)))
predictions_val = predictions_val.todense();
predictions_train = ((classifier.predict(X_train)))
predictions_train = predictions_train.todense();
predictions_all = np.vstack((predictions, predictions_val, predictions_train))
prob_test = ((classifier.predict_proba(X_test)))
prob_test = prob_test.todense()
prob_val = ((classifier.predict_proba(X_val)))
prob_val = prob_val.todense()
prob_train = ((classifier.predict_proba(X_train)))
prob_train = prob_train.todense()
prob = np.vstack((prob_test,prob_val,prob_train))
# score calculation
print("Score calculation..")
precision, recall, fscore, _ = precision_recall_fscore_support(y_test,predictions ,average='macro')
print("Precision macro:%f" % precision)
print("Recall macro:%f" % recall)
print("F-score macro:%f" % fscore)
precision, recall, fscore, _ = precision_recall_fscore_support(y_test,predictions ,average='samples')
print("Precision samples:%f" % precision)
print("Recall samples:%f" % recall)
print("F-score samples:%f" % fscore)
from sklearn.svm import SVC
classifier = BinaryRelevance(classifier = SVC(C=2.2, probability=True))
# train
start_at = time.time()
print("Training classifier SVC with Binary Relevance")
classifier.fit(X_train, y_train)
print("Training finished in time %g seconds" % (time.time()-start_at))
# predict
print("Predicting")
predictions = (classifier.predict(X_test))
predictions = predictions.todense();
predictions_val = ((classifier.predict(X_val)))
predictions_val = predictions_val.todense();
predictions_train = ((classifier.predict(X_train)))
predictions_train = predictions_train.todense();
predictions_all = np.vstack((predictions, predictions_val, predictions_train))
prob_test = ((classifier.predict_proba(X_test)))
prob_test = prob_test.todense()
prob_val = ((classifier.predict_proba(X_val)))
prob_val = prob_val.todense()
prob_train = ((classifier.predict_proba(X_train)))
prob_train = prob_train.todense()
prob = np.vstack((prob_test,prob_val,prob_train))
# score calculation
print("Score calculation..")
precision, recall, fscore, _ = precision_recall_fscore_support(y_test,predictions ,average='macro')
print("Precision macro:%f" % precision)
print("Recall macro:%f" % recall)
print("F-score macro:%f" % fscore)
precision, recall, fscore, _ = precision_recall_fscore_support(y_test,predictions ,average='samples')
print("Precision samples:%f" % precision)
print("Recall samples:%f" % recall)
print("F-score samples:%f" % fscore)
| 32.638009
| 147
| 0.761126
|
bd6d1c50086b0cf0eaff55e620c563640d7b9ff6
| 14,599
|
py
|
Python
|
frameworks/helloworld/tests/test_canary_strategy.py
|
jorgelopez1/hdfs
|
892589180438b90486ec7530d2a63c218b20e79f
|
[
"Apache-2.0"
] | null | null | null |
frameworks/helloworld/tests/test_canary_strategy.py
|
jorgelopez1/hdfs
|
892589180438b90486ec7530d2a63c218b20e79f
|
[
"Apache-2.0"
] | null | null | null |
frameworks/helloworld/tests/test_canary_strategy.py
|
jorgelopez1/hdfs
|
892589180438b90486ec7530d2a63c218b20e79f
|
[
"Apache-2.0"
] | null | null | null |
import logging
import pytest
import sdk_cmd
import sdk_install
import sdk_marathon
import sdk_plan
import sdk_tasks
import sdk_utils
import shakedown
from tests import config
log = logging.getLogger(__name__)
# global pytest variable applicable to whole module
pytestmark = sdk_utils.dcos_1_9_or_higher
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
# due to canary: no tasks should launch, and suppressed shouldn't be set
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
0,
additional_options={
'service': {'spec_file': 'examples/canary.yml'},
'hello': {'count': 4},
'world': {'count': 4}
},
wait_for_deployment=False)
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.sanity
def test_canary_init():
def fn():
# check for empty list internally rather than returning empty list.
# otherwise shakedown.wait_for() will keep going...
return sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod list', json=True) == []
assert shakedown.wait_for(fn, noisy=True, timeout_seconds=10 * 60)
pl = sdk_plan.wait_for_plan_status(config.SERVICE_NAME, 'deploy', 'WAITING')
log.info(pl)
assert pl['status'] == 'WAITING'
assert len(pl['phases']) == 2
phase = pl['phases'][0]
assert phase['status'] == 'WAITING'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'WAITING'
assert steps[1]['status'] == 'WAITING'
assert steps[2]['status'] == 'PENDING'
assert steps[3]['status'] == 'PENDING'
phase = pl['phases'][1]
assert phase['status'] == 'WAITING'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'WAITING'
assert steps[1]['status'] == 'WAITING'
assert steps[2]['status'] == 'PENDING'
assert steps[3]['status'] == 'PENDING'
@pytest.mark.sanity
def test_canary_first():
sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan continue deploy hello-deploy')
expected_tasks = ['hello-0']
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod list', json=True) == expected_tasks
# do not use service_plan always
# when here, plan should always return properly
pl = sdk_plan.wait_for_completed_step(config.SERVICE_NAME, 'deploy', 'hello-deploy', 'hello-0:[server]')
log.info(pl)
assert pl['status'] == 'WAITING'
assert len(pl['phases']) == 2
phase = pl['phases'][0]
assert phase['status'] == 'WAITING'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'WAITING'
assert steps[2]['status'] == 'PENDING'
assert steps[3]['status'] == 'PENDING'
phase = pl['phases'][1]
assert phase['status'] == 'WAITING'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'WAITING'
assert steps[1]['status'] == 'WAITING'
assert steps[2]['status'] == 'PENDING'
assert steps[3]['status'] == 'PENDING'
@pytest.mark.sanity
def test_canary_plan_continue_noop():
sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan continue deploy')
# the plan doesn't have the waiting bit set, so telling it to continue should be a no-op
# (the plan is currently just in WAITING for display purposes)
expected_tasks = ['hello-0']
try:
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks) + 1, timeout_seconds=30)
assert False, "Shouldn't have deployed a second task"
except AssertionError as arg:
raise arg
except:
pass # expected
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod list', json=True) == expected_tasks
@pytest.mark.sanity
def test_canary_second():
sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan continue deploy world-deploy')
sdk_plan.wait_for_step_status(config.SERVICE_NAME, 'deploy', 'world-deploy', 'world-0:[server]', 'PENDING')
# because the plan strategy is serial, the second phase just clears a wait bit without
# proceeding to launch anything:
expected_tasks = ['hello-0']
try:
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks) + 1, timeout_seconds=30)
assert False, "Shouldn't have deployed a second task"
except AssertionError as arg:
raise arg
except:
pass # expected
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod list', json=True) == expected_tasks
pl = sdk_plan.get_deployment_plan(config.SERVICE_NAME)
log.info(pl)
assert pl['status'] == 'WAITING'
assert len(pl['phases']) == 2
phase = pl['phases'][0]
assert phase['status'] == 'WAITING'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'WAITING'
assert steps[2]['status'] == 'PENDING'
assert steps[3]['status'] == 'PENDING'
phase = pl['phases'][1]
assert phase['status'] == 'PENDING'
steps2 = phase['steps']
assert len(steps) == 4
assert steps2[0]['status'] == 'PENDING'
assert steps2[1]['status'] == 'WAITING'
assert steps2[2]['status'] == 'PENDING'
assert steps2[3]['status'] == 'PENDING'
@pytest.mark.sanity
def test_canary_third():
sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan continue deploy hello-deploy')
expected_tasks = [
'hello-0', 'hello-1', 'hello-2', 'hello-3',
'world-0']
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod list', json=True) == expected_tasks
pl = sdk_plan.wait_for_completed_phase(config.SERVICE_NAME, 'deploy', 'hello-deploy')
log.info(pl)
assert pl['status'] == 'WAITING'
assert len(pl['phases']) == 2
phase = pl['phases'][0]
assert phase['status'] == 'COMPLETE'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
phase = pl['phases'][1]
assert phase['status'] == 'WAITING'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'WAITING'
assert steps[2]['status'] == 'PENDING'
assert steps[3]['status'] == 'PENDING'
@pytest.mark.sanity
def test_canary_fourth():
sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan continue deploy world-deploy')
expected_tasks = [
'hello-0', 'hello-1', 'hello-2', 'hello-3',
'world-0', 'world-1', 'world-2', 'world-3']
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod list', json=True) == expected_tasks
pl = sdk_plan.wait_for_completed_plan(config.SERVICE_NAME, 'deploy')
log.info(pl)
assert pl['status'] == 'COMPLETE'
assert len(pl['phases']) == 2
phase = pl['phases'][0]
assert phase['status'] == 'COMPLETE'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
phase = pl['phases'][1]
assert phase['status'] == 'COMPLETE'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
@pytest.mark.sanity
def test_increase_count():
sdk_marathon.bump_task_count_config(config.SERVICE_NAME, 'HELLO_COUNT')
expected_tasks = [
'hello-0', 'hello-1', 'hello-2', 'hello-3',
'world-0', 'world-1', 'world-2', 'world-3']
try:
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks) + 1, timeout_seconds=60)
assert False, "Should not start task now"
except AssertionError as arg:
raise arg
except:
pass # expected to fail
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod list', json=True) == expected_tasks
pl = sdk_plan.wait_for_plan_status(config.SERVICE_NAME, 'deploy', 'WAITING')
log.info(pl)
assert pl['status'] == 'WAITING'
assert len(pl['phases']) == 2
phase = pl['phases'][0]
assert phase['status'] == 'WAITING'
steps = phase['steps']
assert len(steps) == 5
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
assert steps[4]['status'] == 'WAITING'
phase = pl['phases'][1]
assert phase['status'] == 'COMPLETE'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan continue deploy hello-deploy')
expected_tasks = [
'hello-0', 'hello-1', 'hello-2', 'hello-3', 'hello-4',
'world-0', 'world-1', 'world-2', 'world-3']
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod list', json=True) == expected_tasks
pl = sdk_plan.wait_for_plan_status(config.SERVICE_NAME, 'deploy', 'COMPLETE')
log.info(pl)
assert pl['status'] == 'COMPLETE'
assert len(pl['phases']) == 2
phase = pl['phases'][0]
assert phase['status'] == 'COMPLETE'
steps = phase['steps']
assert len(steps) == 5
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
assert steps[4]['status'] == 'COMPLETE'
phase = pl['phases'][1]
assert phase['status'] == 'COMPLETE'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
@pytest.mark.sanity
def test_increase_cpu():
hello_0_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-0-server')
sdk_marathon.bump_cpu_count_config(config.SERVICE_NAME, 'HELLO_CPUS')
pl = sdk_plan.wait_for_plan_status(config.SERVICE_NAME, 'deploy', 'WAITING')
log.info(pl)
assert pl['status'] == 'WAITING'
assert len(pl['phases']) == 2
phase = pl['phases'][0]
assert phase['status'] == 'WAITING'
steps = phase['steps']
assert len(steps) == 5
assert steps[0]['status'] == 'WAITING'
assert steps[1]['status'] == 'WAITING'
assert steps[2]['status'] == 'PENDING'
assert steps[3]['status'] == 'PENDING'
assert steps[4]['status'] == 'PENDING'
phase = pl['phases'][1]
assert phase['status'] == 'COMPLETE'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
# check that all prior tasks are still running, no changes yet
expected_tasks = [
'hello-0', 'hello-1', 'hello-2', 'hello-3', 'hello-4',
'world-0', 'world-1', 'world-2', 'world-3']
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod list', json=True) == expected_tasks
assert hello_0_ids == sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-0-server')
sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan continue deploy hello-deploy')
sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0-server', hello_0_ids)
sdk_tasks.check_running(config.SERVICE_NAME, len(expected_tasks))
pl = sdk_plan.wait_for_step_status(config.SERVICE_NAME, 'deploy', 'hello-deploy', 'hello-0:[server]', 'COMPLETE')
log.info(pl)
assert pl['status'] == 'WAITING'
assert len(pl['phases']) == 2
phase = pl['phases'][0]
assert phase['status'] == 'WAITING'
steps = phase['steps']
assert len(steps) == 5
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'WAITING'
assert steps[2]['status'] == 'PENDING'
assert steps[3]['status'] == 'PENDING'
assert steps[4]['status'] == 'PENDING'
phase = pl['phases'][1]
assert phase['status'] == 'COMPLETE'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
hello_1_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-1-server')
sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan continue deploy hello-deploy')
sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-1-server', hello_1_ids)
pl = sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME)
log.info(pl)
assert pl['status'] == 'COMPLETE'
assert len(pl['phases']) == 2
phase = pl['phases'][0]
assert phase['status'] == 'COMPLETE'
steps = phase['steps']
assert len(steps) == 5
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
assert steps[4]['status'] == 'COMPLETE'
phase = pl['phases'][1]
assert phase['status'] == 'COMPLETE'
steps = phase['steps']
assert len(steps) == 4
assert steps[0]['status'] == 'COMPLETE'
assert steps[1]['status'] == 'COMPLETE'
assert steps[2]['status'] == 'COMPLETE'
assert steps[3]['status'] == 'COMPLETE'
| 34.513002
| 117
| 0.646551
|
87ee46b5937e58d5aba854d6f628d4794eeb86cb
| 11,374
|
py
|
Python
|
otcextensions/sdk/dcaas/v2/_proxy.py
|
artem-lifshits/python-otcextensions
|
2021da124f393e0429dd5913a3bc635e6143ba1e
|
[
"Apache-2.0"
] | null | null | null |
otcextensions/sdk/dcaas/v2/_proxy.py
|
artem-lifshits/python-otcextensions
|
2021da124f393e0429dd5913a3bc635e6143ba1e
|
[
"Apache-2.0"
] | null | null | null |
otcextensions/sdk/dcaas/v2/_proxy.py
|
artem-lifshits/python-otcextensions
|
2021da124f393e0429dd5913a3bc635e6143ba1e
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import proxy
from otcextensions.sdk.dcaas.v2 import virtual_gateway as _virtual_gateway
from otcextensions.sdk.dcaas.v2 import connection as _connection
from otcextensions.sdk.dcaas.v2 import virtual_interface as _virtual_interface
class Proxy(proxy.Proxy):
skip_discovery = True
# ======== Virtual gateways ========
def virtual_gateways(self, **query):
"""Retrieve a generator of virtual gateways
:returns: A generator of virtual gateways
:class:`~otcextensions.sdk.dcaas.v2.virtual_gateway.VirtualGateway`
instances
"""
return self._list(_virtual_gateway.VirtualGateway, **query)
def create_virtual_gateway(self, **attrs):
"""Create a new virtual gateway from attributes
:param dict attrs: Keyword arguments which will be used to create a
:class:`~otcextensions.sdk.dcaas.v2.virtual_gateway.VirtualGateway`,
comprised of the properties on the VirtualGateway class.
:returns: The results of virtual gateway creation
:rtype: :class:`~otcextensions.sdk.dcaas.v2.virtual_gateway.
VirtualGateway`
"""
return self._create(_virtual_gateway.VirtualGateway,
prepend_key=False, **attrs)
def get_virtual_gateway(self, virtual_gateway):
"""Get a virtual_gateway
:param virtual_gateway: The value can be the ID of a virtual_gateway
or a :class:`~otcextensions.sdk.dcaas.v2.virtual_gateway.
VirtualGateway` instance.
:returns: Virtual gateway instance
:rtype: :class:`~otcextensions.sdk.dcaasdcaas.v2.virtual_gateway.
VirtualGateway`
"""
return self._get(_virtual_gateway.VirtualGateway, virtual_gateway)
def delete_virtual_gateway(self, virtual_gateway, ignore_missing=True):
"""Delete a virtual_gateway
:param virtual_gateway: The value can be the ID of a virtual gateway
or a :class:`~otcextensions.sdk.dcaas.v2.virtual_gateway.
VirtualGateway` instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the virtual gateway does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent zone.
:returns: `None`
"""
return self._delete(_virtual_gateway.VirtualGateway, virtual_gateway,
ignore_missing=ignore_missing)
def update_virtual_gateway(self, virtual_gateway, **attrs):
"""Update virtual gateway attributes
:param virtual_gateway: The id or an instance of
:class:`~otcextensions.sdk.dcaas.v2.virtual_gateway.VirtualGateway`
:param dict attrs: attributes for update on
:class:`~otcextensions.sdk.dcaas.v2.virtual_gateway.VirtualGateway`
:rtype: :class:`~otcextensions.sdk.dcaas.v2.virtual_gateway.
VirtualGateway`
"""
return self._update(_virtual_gateway.VirtualGateway,
virtual_gateway, **attrs)
def find_virtual_gateway(self, name_or_id, ignore_missing=True, **attrs):
"""Find a single virtual gateway
:param name_or_id: The name or ID of a virtual gateway
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the zone does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent virtual gateway.
:returns: ``None``
"""
return self._find(_virtual_gateway.VirtualGateway, name_or_id,
ignore_missing=ignore_missing,
**attrs)
# ======== Connections ========
def connections(self, **query):
"""Retrieve a generator of connections
:returns: A generator of connections
:class:`~otcextensions.sdk.dcaas.v2.connection.Connection`
instances
"""
return self._list(_connection.Connection, **query)
def create_connection(self, **attrs):
"""Create a new connection from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~otcextensions.sdk.dcaas.v2.connection.Connection`,
comprised of the properties on the Connection class.
:returns: The results of connection creation
:rtype: :class:`~otcextensions.sdk.dcaas.v2.connection.Connection`
"""
return self._create(_connection.Connection, prepend_key=False, **attrs)
def get_connection(self, connection):
"""Get a connection
:param connection: The value can be the ID of a connection
or a :class:`~otcextensions.sdk.dcaas.v2.connection.Connection`
instance.
:returns: Connection instance
:rtype: :class:`~otcextensions.sdk.dcaas.v2.connection.Connection`
"""
return self._get(_connection.Connection, connection)
def delete_connection(self, connection, ignore_missing=True):
"""Delete a connection
:param connection: The value can be the ID of a connection
or a :class:`~otcextensions.sdk.dcaas.v2.connection.Connection`
instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the connection does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent connection.
:returns: `None`
"""
return self._delete(_connection.Connection, connection,
ignore_missing=ignore_missing)
def update_connection(self, connection, **attrs):
"""Update connection attributes
:param connection: The id or an instance of
:class:`~otcextensions.sdk.dcaas.v2.connection.Connection`
:param dict attrs: attributes for update on
:class:`~otcextensions.sdk.dcaas.v2.connection.Connection`
:rtype:
:class:`~otcextensions.sdk.dcaas.v2.connection.Connection`
"""
return self._update(_connection.Connection, connection, **attrs)
def find_connection(self, name_or_id, ignore_missing=True, **attrs):
"""Find a single connection
:param name_or_id: The name or ID of a connection
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the connection does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent connection.
:returns: ``None``
"""
return self._find(_connection.Connection, name_or_id,
ignore_missing=ignore_missing,
**attrs)
# ======== Virtual interface ========
def virtual_interfaces(self, **query):
"""Retrieve a generator of virtual interfaces
:returns: A generator of virtual interfaces
:class:`~otcextensions.sdk.dcaas.v2.virtual_interface.
VirtualInterface` instances
"""
return self._list(_virtual_interface.VirtualInterface, **query)
def create_virtual_interface(self, **attrs):
"""Create a new virtual interface from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~otcextensions.sdk.dcaas.v2.virtual_interface.
VirtualInterface`, comprised of the properties on the Connection
class.
:returns: The results of virtual interface creation
:rtype: :class:`~otcextensions.sdk.dcaas.v2.virtual_interface.
VirtualInterface`
"""
return self._create(_virtual_interface.VirtualInterface,
prepend_key=False, **attrs)
def get_virtual_interface(self, virtual_interface):
"""Get a virtual_interface
:param virtual_interface: The value can be the ID of a
virtual_interface or a :class:`~otcextensions.sdk.dcaas.v2.
virtual_interface.VirtualInterface` instance.
:returns: Virtual interface instance
:rtype:
:class:`~otcextensions.sdk.dcaas.v2.virtual_interface.
VirtualInterface`
"""
return self._get(_virtual_interface.VirtualInterface,
virtual_interface)
def delete_virtual_interface(self, virtual_interface, ignore_missing=True):
"""Delete a virtual interface
:param virtual_interface: The value can be the ID of a virtual
interface or a :class:`~otcextensions.sdk.dcaas.v2.
virtual_interface.VirtualInterface` instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the virtual interface does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent virtual interface.
:returns: `None`
"""
return self._delete(_virtual_interface.VirtualInterface,
virtual_interface, ignore_missing=ignore_missing)
def update_virtual_interface(self, virtual_interface, **attrs):
"""Update virtual interface attributes
:param virtual_interface: The id or an instance of
:class:`~otcextensions.sdk.dcaas.v2.virtual_interface.
VirtualInterface`
:param dict attrs: attributes for update on
:class:`~otcextensions.sdk.dcaas.v2.virtual_interface.
VirtualInterface`
:rtype: :class:`~otcextensions.sdk.dcaas.v2.virtual_interface.
VirtualInterface`
"""
return self._update(_virtual_interface.VirtualInterface,
virtual_interface, **attrs)
def find_virtual_interface(self, name_or_id, ignore_missing=True, **attrs):
"""Find a single virtual interface
:param name_or_id: The name or ID of a virtual interface
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the virtual interface does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent virtual interface.
:returns: ``None``
"""
return self._find(_virtual_interface.VirtualInterface, name_or_id,
ignore_missing=ignore_missing,
**attrs)
| 42.125926
| 80
| 0.649815
|
5ff49ceb29e27809383b538b93194ee8ddfb9b58
| 11,374
|
py
|
Python
|
dbsync/client/pull.py
|
bintlabs/python-sync-db
|
bb23d77abf560793696f906e030950aec04c3361
|
[
"MIT"
] | 42
|
2015-02-15T05:37:08.000Z
|
2022-02-09T04:19:43.000Z
|
dbsync/client/pull.py
|
bintlabs/python-sync-db
|
bb23d77abf560793696f906e030950aec04c3361
|
[
"MIT"
] | 1
|
2017-02-22T03:01:43.000Z
|
2020-09-27T02:25:16.000Z
|
dbsync/client/pull.py
|
bintlabs/python-sync-db
|
bb23d77abf560793696f906e030950aec04c3361
|
[
"MIT"
] | 9
|
2016-10-27T10:32:13.000Z
|
2020-12-02T06:40:15.000Z
|
"""
Pull, merge and related operations.
"""
import collections
from sqlalchemy.orm import make_transient
from dbsync.lang import *
from dbsync.utils import class_mapper, get_pk, query_model
from dbsync import core
from dbsync.models import Operation
from dbsync import dialects
from dbsync.messages.pull import PullMessage, PullRequestMessage
from dbsync.client.compression import compress, compressed_operations
from dbsync.client.conflicts import (
get_related_tables,
get_fks,
find_direct_conflicts,
find_dependency_conflicts,
find_reversed_dependency_conflicts,
find_insert_conflicts,
find_unique_conflicts)
from dbsync.client.net import post_request
# Utilities specific to the merge
def max_local(model, session):
"""
Returns the maximum value for the primary key of the given model
in the local database.
"""
if model is None:
raise ValueError("null model given to max_local query")
return dialects.max_local(model, session)
def max_remote(model, container):
"""
Returns the maximum value for the primary key of the given model
in the container.
"""
return max(getattr(obj, get_pk(obj)) for obj in container.query(model))
def update_local_id(old_id, new_id, model, session):
"""
Updates the tuple matching *old_id* with *new_id*, and updates all
dependent tuples in other tables as well.
"""
# Updating either the tuple or the dependent tuples first would
# cause integrity violations if the transaction is flushed in
# between. The order doesn't matter.
if model is None:
raise ValueError("null model given to update_local_id subtransaction")
# must load fully, don't know yet why
obj = query_model(session, model).\
filter_by(**{get_pk(model): old_id}).first()
setattr(obj, get_pk(model), new_id)
# Then the dependent ones
related_tables = get_related_tables(model)
mapped_fks = ifilter(
lambda (m, fks): m is not None and fks,
[(core.synched_models.tables.get(t.name, core.null_model).model,
get_fks(t, class_mapper(model).mapped_table))
for t in related_tables])
for model, fks in mapped_fks:
for fk in fks:
for obj in query_model(session, model).filter_by(**{fk: old_id}):
setattr(obj, fk, new_id)
session.flush() # raise integrity errors now
UniqueConstraintErrorEntry = collections.namedtuple(
'UniqueConstraintErrorEntry',
'model pk columns')
class UniqueConstraintError(Exception):
entries = None
def __init__(self, entries):
entries = map(partial(apply, UniqueConstraintErrorEntry, ()), entries)
super(UniqueConstraintError, self).__init__(entries)
self.entries = entries
def __repr__(self):
if not self.entries: return u"<UniqueConstraintError - empty>"
return u"<UniqueConstraintError - {0}>".format(
u"; ".join(
u"{0} pk {1} columns ({2})".format(
entry.model.__name__,
entry.pk,
u", ".join(entry.columns))
for entry in self.entries))
def __str__(self): return repr(self)
@core.with_transaction()
def merge(pull_message, session=None):
"""
Merges a message from the server with the local database.
*pull_message* is an instance of dbsync.messages.pull.PullMessage.
"""
if not isinstance(pull_message, PullMessage):
raise TypeError("need an instance of dbsync.messages.pull.PullMessage "
"to perform the local merge operation")
valid_cts = set(ct for ct in core.synched_models.ids)
unversioned_ops = compress(session=session)
pull_ops = filter(attr('content_type_id').in_(valid_cts),
pull_message.operations)
pull_ops = compressed_operations(pull_ops)
# I) first phase: resolve unique constraint conflicts if
# possible. Abort early if a human error is detected
unique_conflicts, unique_errors = find_unique_conflicts(
pull_ops, unversioned_ops, pull_message, session)
if unique_errors:
raise UniqueConstraintError(unique_errors)
conflicting_objects = set()
for uc in unique_conflicts:
obj = uc['object']
conflicting_objects.add(obj)
for key, value in izip(uc['columns'], uc['new_values']):
setattr(obj, key, value)
# Resolve potential cyclical conflicts by deleting and reinserting
for obj in conflicting_objects:
make_transient(obj) # remove from session
for model in set(type(obj) for obj in conflicting_objects):
pk_name = get_pk(model)
pks = [getattr(obj, pk_name)
for obj in conflicting_objects
if type(obj) is model]
session.query(model).filter(getattr(model, pk_name).in_(pks)).\
delete(synchronize_session=False) # remove from the database
session.add_all(conflicting_objects) # reinsert them
session.flush()
# II) second phase: detect conflicts between pulled operations and
# unversioned ones
direct_conflicts = find_direct_conflicts(pull_ops, unversioned_ops)
# in which the delete operation is registered on the pull message
dependency_conflicts = find_dependency_conflicts(
pull_ops, unversioned_ops, session)
# in which the delete operation was performed locally
reversed_dependency_conflicts = find_reversed_dependency_conflicts(
pull_ops, unversioned_ops, pull_message)
insert_conflicts = find_insert_conflicts(pull_ops, unversioned_ops)
# III) third phase: perform pull operations, when allowed and
# while resolving conflicts
def extract(op, conflicts):
return [local for remote, local in conflicts if remote is op]
def purgelocal(local):
session.delete(local)
exclude = lambda tup: tup[1] is not local
mfilter(exclude, direct_conflicts)
mfilter(exclude, dependency_conflicts)
mfilter(exclude, reversed_dependency_conflicts)
mfilter(exclude, insert_conflicts)
unversioned_ops.remove(local)
for pull_op in pull_ops:
# flag to control whether the remote operation is free of obstacles
can_perform = True
# flag to detect the early exclusion of a remote operation
reverted = False
# the class of the operation
class_ = pull_op.tracked_model
direct = extract(pull_op, direct_conflicts)
if direct:
if pull_op.command == 'd':
can_perform = False
for local in direct:
pair = (pull_op.command, local.command)
if pair == ('u', 'u'):
can_perform = False # favor local changes over remote ones
elif pair == ('u', 'd'):
pull_op.command = 'i' # negate the local delete
purgelocal(local)
elif pair == ('d', 'u'):
local.command = 'i' # negate the remote delete
session.flush()
reverted = True
else: # ('d', 'd')
purgelocal(local)
dependency = extract(pull_op, dependency_conflicts)
if dependency and not reverted:
can_perform = False
order = min(op.order for op in unversioned_ops)
# first move all operations further in order, to make way
# for the new one
for op in unversioned_ops:
op.order = op.order + 1
session.flush()
# then create operation to reflect the reinsertion and
# maintain a correct operation history
session.add(Operation(row_id=pull_op.row_id,
content_type_id=pull_op.content_type_id,
command='i',
order=order))
reversed_dependency = extract(pull_op, reversed_dependency_conflicts)
for local in reversed_dependency:
# reinsert record
local.command = 'i'
local.perform(pull_message, session)
# delete trace of deletion
purgelocal(local)
insert = extract(pull_op, insert_conflicts)
for local in insert:
session.flush()
next_id = max(max_remote(class_, pull_message),
max_local(class_, session)) + 1
update_local_id(local.row_id, next_id, class_, session)
local.row_id = next_id
if can_perform:
pull_op.perform(pull_message, session)
session.flush()
# IV) fourth phase: insert versions from the pull_message
for pull_version in pull_message.versions:
session.add(pull_version)
class BadResponseError(Exception):
pass
def pull(pull_url, extra_data=None,
encode=None, decode=None, headers=None, monitor=None, timeout=None,
include_extensions=True):
"""
Attempts a pull from the server. Returns the response body.
Additional data can be passed to the request by giving
*extra_data*, a dictionary of values.
If not interrupted, the pull will perform a local merge. If the
response from the server isn't appropriate, it will raise a
dbysnc.client.pull.BadResponseError.
By default, the *encode* function is ``json.dumps``, the *decode*
function is ``json.loads``, and the *headers* are appropriate HTTP
headers for JSON.
*monitor* should be a routine that receives a dictionary with
information of the state of the request and merge procedure.
*include_extensions* dictates whether the extension functions will
be called during the merge or not. Default is ``True``.
"""
assert isinstance(pull_url, basestring), "pull url must be a string"
assert bool(pull_url), "pull url can't be empty"
if extra_data is not None:
assert isinstance(extra_data, dict), "extra data must be a dictionary"
request_message = PullRequestMessage()
for op in compress(): request_message.add_operation(op)
data = request_message.to_json()
data.update({'extra_data': extra_data or {}})
code, reason, response = post_request(
pull_url, data, encode, decode, headers, timeout, monitor)
if (code // 100 != 2):
if monitor:
monitor({'status': "error", 'reason': reason.lower()})
raise BadResponseError(code, reason, response)
if response is None:
if monitor:
monitor({
'status': "error",
'reason': "invalid response format"})
raise BadResponseError(code, reason, response)
message = None
try:
message = PullMessage(response)
except KeyError:
if monitor:
monitor({
'status': "error",
'reason': "invalid message format"})
raise BadResponseError(
"response object isn't a valid PullMessage", response)
if monitor:
monitor({
'status': "merging",
'operations': len(message.operations)})
merge(message, include_extensions=include_extensions)
if monitor:
monitor({'status': "done"})
# return the response for the programmer to do what she wants
# afterwards
return response
| 36.690323
| 79
| 0.648497
|
e3e6300ded951bd09be70108f792b14fa4f79846
| 898
|
py
|
Python
|
salt/wheel/key.py
|
gotcha/salt
|
7b84c704777d3d2062911895dc3fdf93d40e9848
|
[
"Apache-2.0"
] | 1
|
2015-10-06T22:25:22.000Z
|
2015-10-06T22:25:22.000Z
|
salt/wheel/key.py
|
gotcha/salt
|
7b84c704777d3d2062911895dc3fdf93d40e9848
|
[
"Apache-2.0"
] | null | null | null |
salt/wheel/key.py
|
gotcha/salt
|
7b84c704777d3d2062911895dc3fdf93d40e9848
|
[
"Apache-2.0"
] | null | null | null |
'''
Wheel system wrapper for key system
'''
# Import salt libs
import salt.key
def list_all():
'''
List the keys under a named status
'''
skey = salt.key.Key(__opts__)
return skey.all_keys()
def accept(match):
'''
Accept keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.accept(match)
def delete(match):
'''
Delete keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.delete(match)
def reject(match):
'''
Delete keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.reject(match)
def key_str(match):
'''
Return the key strings
'''
skey = salt.key.Key(__opts__)
return skey.key_str(match)
def finger(match):
'''
Return the matching key fingerprints
'''
skey = salt.key.Key(__opts__)
return skey.finger(match)
| 17.96
| 40
| 0.61804
|
9b9e45c6ca0e6e04b6693bec41d0dbd8eac7be93
| 1,911
|
py
|
Python
|
tests/property/lambda_getter.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 25
|
2015-04-16T04:31:49.000Z
|
2022-03-10T15:53:28.000Z
|
tests/property/lambda_getter.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 1
|
2018-11-21T22:40:02.000Z
|
2018-11-26T17:53:11.000Z
|
tests/property/lambda_getter.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 1
|
2021-03-26T03:36:19.000Z
|
2021-03-26T03:36:19.000Z
|
class PropertyBase(Exception):
pass
class PropertyGet(PropertyBase):
pass
class PropertySet(PropertyBase):
pass
class PropertyDel(PropertyBase):
pass
class BaseClass(object):
def __init__(self):
self._spam = 5
@property
def spam(self):
"""BaseClass.getter"""
return self._spam
@spam.setter
def spam(self, value):
self._spam = value
@spam.deleter
def spam(self):
del self._spam
class SubClass(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""SubClass.getter"""
raise PropertyGet(self._spam)
@spam.setter
def spam(self, value):
raise PropertySet(self._spam)
@spam.deleter
def spam(self):
raise PropertyDel(self._spam)
class PropertyDocBase(object):
_spam = 1
def _get_spam(self):
return self._spam
spam = property(_get_spam, doc="spam spam spam")
class PropertyDocSub(PropertyDocBase):
@PropertyDocBase.spam.getter
def spam(self):
"""The decorator does not use this doc string"""
return self._spam
class PropertySubNewGetter(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""new docstring"""
return 5
class PropertyNewGetter(object):
@property
def spam(self):
"""original docstring"""
return 1
@spam.getter
def spam(self):
"""new docstring"""
return 8
def test_property_decorator_baseclass():
# see #1620
base = BaseClass()
___assertEqual(base.spam, 5)
___assertEqual(base._spam, 5)
base.spam = 10
___assertEqual(base.spam, 10)
___assertEqual(base._spam, 10)
delattr(base, "spam")
___assertTrue(not hasattr(base, "spam"))
___assertTrue(not hasattr(base, "_spam"))
base.spam = 20
___assertEqual(base.spam, 20)
___assertEqual(base._spam, 20)
test_property_decorator_baseclass()
| 20.771739
| 56
| 0.642595
|
19aec18e81c202e38c2a04bec4797820453bd8a0
| 1,493
|
py
|
Python
|
openwater/zone/validation.py
|
jeradM/openwater
|
740b7e76622a1ee909b970d9e5c612a840466cec
|
[
"MIT"
] | null | null | null |
openwater/zone/validation.py
|
jeradM/openwater
|
740b7e76622a1ee909b970d9e5c612a840466cec
|
[
"MIT"
] | null | null | null |
openwater/zone/validation.py
|
jeradM/openwater
|
740b7e76622a1ee909b970d9e5c612a840466cec
|
[
"MIT"
] | null | null | null |
import copy
from typing import Optional, Type, Any, Dict
from cerberus import Validator
from cerberus.errors import ErrorList
from openwater.zone.model import BaseZone
ATTR_SCHEMA = {
"soil_type": {"type": "string"},
"precip_rate": {"type": "float"},
}
RUN_SCHEMA = {
"id": {"type": "integer"},
"zone_id": {"type": "integer"},
"start": {"type": "string", "required": True},
"duration": {"type": "integer", "required": True},
}
ZONE_SCHEMA = {
"id": {"type": "integer"},
"name": {"type": "string", "minlength": 3, "maxlength": 100, "required": True},
"zone_type": {"type": "string", "maxlength": 50, "required": True},
"is_master": {"type": "boolean", "required": True},
"open_offset": {"type": "integer", "nullable": True},
"close_offset": {"type": "integer", "nullable": True},
"attrs": {"type": "dict", "required": True},
"runs": {"type": "list", "required": False, "schema": RUN_SCHEMA},
}
def validate_zone(data: dict) -> Optional["ErrorList"]:
validator = Validator(ZONE_SCHEMA)
if not validator.validate(data):
return validator.errors
return None
def validate_attrs(zone_cls: Type[BaseZone], data: dict) -> Optional["ErrorList"]:
schema: Dict[str, Any] = copy.deepcopy(ATTR_SCHEMA)
if hasattr(zone_cls, "ATTR_SCHEMA"):
schema.update(getattr(zone_cls, "ATTR_SCHEMA"))
validator = Validator(schema)
if not validator.validate(data):
return validator.errors
return None
| 31.104167
| 83
| 0.633624
|
d8134ecf11d4f75ec933f8411b9f4ae231d16803
| 2,126
|
py
|
Python
|
.test-infra/jenkins/dependency_check/version_comparer.py
|
charithe/beam
|
f085cb500730cf0c67c467ac55f92b3c59f52b39
|
[
"Apache-2.0"
] | 5,279
|
2016-12-29T04:00:44.000Z
|
2022-03-31T22:56:45.000Z
|
.test-infra/jenkins/dependency_check/version_comparer.py
|
charithe/beam
|
f085cb500730cf0c67c467ac55f92b3c59f52b39
|
[
"Apache-2.0"
] | 14,149
|
2016-12-28T00:43:50.000Z
|
2022-03-31T23:50:22.000Z
|
.test-infra/jenkins/dependency_check/version_comparer.py
|
charithe/beam
|
f085cb500730cf0c67c467ac55f92b3c59f52b39
|
[
"Apache-2.0"
] | 3,763
|
2016-12-29T04:06:10.000Z
|
2022-03-31T22:25:49.000Z
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dependency_check.report_generator_config import ReportGeneratorConfig
def compare_dependency_versions(curr_ver, latest_ver):
"""
Compare the current using version and the latest version.
Return true if a major version change was found, or 3 minor versions that the current version is behind.
Args:
curr_ver
latest_ver
Return:
boolean
"""
if curr_ver is None or latest_ver is None:
return True
else:
curr_ver_splitted = curr_ver.split('.')
latest_ver_splitted = latest_ver.split('.')
curr_major_ver = curr_ver_splitted[0]
latest_major_ver = latest_ver_splitted[0]
# compare major versions
if curr_major_ver != latest_major_ver:
return True
# compare minor versions
else:
curr_minor_ver = curr_ver_splitted[1] if len(curr_ver_splitted) > 1 else None
latest_minor_ver = latest_ver_splitted[1] if len(latest_ver_splitted) > 1 else None
if curr_minor_ver is not None and latest_minor_ver is not None:
if (not curr_minor_ver.isdigit() or not latest_minor_ver.isdigit()) and curr_minor_ver != latest_minor_ver:
return True
elif int(curr_minor_ver) + ReportGeneratorConfig.MAX_MINOR_VERSION_DIFF <= int(latest_minor_ver):
return True
# TODO: Comparing patch versions if needed.
return False
| 40.884615
| 115
| 0.749294
|
f131b6a60584004f95db454d56f84288153024a6
| 32,263
|
py
|
Python
|
tests/test_bifacial_radiance.py
|
kperrynrel/bifacial_radiance
|
cf5ae46b4ef93990e3e1619956a186376cb4fd8a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_bifacial_radiance.py
|
kperrynrel/bifacial_radiance
|
cf5ae46b4ef93990e3e1619956a186376cb4fd8a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_bifacial_radiance.py
|
kperrynrel/bifacial_radiance
|
cf5ae46b4ef93990e3e1619956a186376cb4fd8a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 10:08:25 2018
@author: cdeline
Using pytest to create unit tests for bifacial_radiance.
to run unit tests, run pytest from the command line in the bifacial_radiance directory
to run coverage tests, run py.test --cov-report term-missing --cov=bifacial_radiance
"""
#from bifacial_radiance import RadianceObj, SceneObj, AnalysisObj
import bifacial_radiance
import numpy as np
import pytest
import os
import datetime
import pandas as pd
# try navigating to tests directory so tests run from here.
try:
os.chdir('tests')
except:
pass
TESTDIR = os.path.dirname(__file__) # this folder
# test the readepw on a dummy Boulder EPW file in the /tests/ directory
MET_FILENAME = 'USA_CO_Boulder.724699_TMY2.epw'
# also test a dummy TMY3 Denver file in /tests/
MET_FILENAME2 = "724666TYA.CSV"
# custom 2-year 15 minute datafile with leab year
MET_FILENAME3= "Custom_WeatherFile_2years_15mins_BESTFieldData.csv"
# SolarGIS 1.5-year hourly datafile with leap year
MET_FILENAME4="SOLARGIS_Almeria_Spain_20210331.csv"
# custom 1 year TMY3 datafile with an added "Tracker Angle (degrees)" column
MET_FILENAME5="Custom_WeatherFile_TMY3format_60mins_2021_wTrackerAngles_BESTFieldData.csv"
#def test_quickExample():
# results = bifacial_radiance.main.quickExample(TESTDIR)
# assert np.mean(results.Wm2Back) == pytest.approx(195380.94444444444, rel = 0.03) # was 182 in v0.2.2
def test_RadianceObj_set1axis():
# test set1axis. requires metdata for boulder.
name = "_test_set1axis"
demo = bifacial_radiance.RadianceObj(name)
assert str(demo)[-16:-2]==name #this depends on the insertion order of the dictionary repr of demo - may not be consistent
#try:
# epwfile = demo.getEPW(lat=40.01667, lon=-105.25) # From EPW: {N 40° 1'} {W 105° 15'}
#except: # adding an except in case the internet connection in the lab forbids the epw donwload.
epwfile = MET_FILENAME
metdata = demo.readWeatherFile(weatherFile=epwfile, coerce_year=2001)
trackerdict = demo.set1axis()
assert trackerdict[0]['count'] == 78 #80
assert trackerdict[45]['count'] == 822 #
def test_RadianceObj_fixed_tilt_end_to_end():
# just run the demo example. Rear irradiance fraction roughly 11.8% for 0.95m landscape panel
# takes 12 seconds
analysis = bifacial_radiance.main.quickExample(testfolder=TESTDIR)
"""
name = "_test_fixed_tilt_end_to_end"
demo = bifacial_radiance.RadianceObj(name) # Create a RadianceObj 'object'
demo.setGround(0.62) # input albedo number or material name like 'concrete'. To see options, run this without any input.
metdata = demo.readWeatherFile(weatherFile= MET_FILENAME, coerce_year=2001) # read in the EPW weather data from above
timeindex = metdata.datetime.index(pd.to_datetime('2001-06-17 12:0:0 -7'))
demo.gendaylit(timeindex=timeindex, metdata=metdata) # Noon, June 17th
# create a scene using panels in landscape at 10 deg tilt, 1.5m pitch. 0.2 m ground clearance
sceneDict = {'tilt':10,'pitch':1.5,'clearance_height':0.2, 'nMods':10, 'nRows':3}
module = demo.makeModule(name='test-module',y=0.95,x=1.59, xgap=0)
scene = demo.makeScene(module, sceneDict) #makeScene creates a .rad file with 20 modules per row, 7 rows.
octfile = demo.makeOct(demo.getfilelist()) # makeOct combines all of the ground, sky and object files into a .oct file.
analysis = bifacial_radiance.AnalysisObj(octfile, demo.name) # return an analysis object including the scan dimensions for back irradiance
(frontscan,backscan) = analysis.moduleAnalysis(scene)
analysis.analysis(octfile, demo.name, frontscan, backscan) # compare the back vs front irradiance
"""
#assert np.round(np.mean(analysis.backRatio),decimals=2) == 0.12 # NOTE: this value is 0.11 when your module size is 1m, 0.12 when module size is 0.95m
assert np.mean(analysis.backRatio) == pytest.approx(0.12, abs = 0.01)
def test_Radiance_high_azimuth_modelchains():
# duplicate next example using modelchain
# high azimuth .ini file
HIGH_AZIMUTH_INI = os.path.join(TESTDIR, "ini_highAzimuth.ini")
(Params)= bifacial_radiance.load.readconfigurationinputfile(inifile=HIGH_AZIMUTH_INI)
Params[0]['testfolder'] = TESTDIR
# unpack the Params tuple with *Params
demo2, analysis = bifacial_radiance.modelchain.runModelChain(*Params )
#assert np.round(np.mean(analysis.backRatio),2) == 0.20 # bifi ratio was == 0.22 in v0.2.2
assert np.mean(analysis.Wm2Front) == pytest.approx(899, rel = 0.005) # was 912 in v0.2.3
assert np.mean(analysis.Wm2Back) == pytest.approx(189, rel = 0.03) # was 182 in v0.2.2
"""
def test_RadianceObj_high_azimuth_angle_end_to_end():
# modify example for high azimuth angle to test different parts of _makeSceneNxR. Rear irradiance fraction roughly 17.3% for 0.95m landscape panel
# takes 14 seconds for sensorsy = 9, 11 seconds for sensorsy = 2
name = "_test_high_azimuth_angle_end_to_end"
demo = bifacial_radiance.RadianceObj(name) # Create a RadianceObj 'object'
demo.setGround('white_EPDM') # input albedo number or material name like 'concrete'. To see options, run this without any input.
#metdata = demo.readEPW() # read in the EPW weather data from above
metdata = demo.readTMY(MET_FILENAME2) # select a TMY file using graphical picker
# Now we either choose a single time point, or use cumulativesky for the entire year.
fullYear = False
if fullYear:
demo.genCumSky(demo.epwfile) # entire year. # Don't know how to test this yet in pytest...
else:
demo.gendaylit(metdata=metdata,timeindex=4020) # Noon, June 17th
# create a scene using panels in landscape at 10 deg tilt, 1.5m pitch. 0.2 m ground clearance
sceneDict = {'tilt':10,'pitch':1.5,'height':0.2,'azimuth':30, 'nMods':10, 'nRows':3}
module = demo.makeModule(name='test-module',y=0.95,x=1.59, xgap=0)
scene = demo.makeScene('test-module',sceneDict) #makeScene creates a .rad file with 20 modules per row, 7 rows.
octfile = demo.makeOct(demo.getfilelist()) # makeOct combines all of the ground, sky and object files into a .oct file.
analysis = bifacial_radiance.AnalysisObj(octfile, demo.name) # return an analysis object including the scan dimensions for back irradiance
(frontscan,backscan) = analysis.moduleAnalysis(scene)
analysis.analysis(octfile, demo.name, frontscan, backscan) # compare the back vs front irradiance
#assert np.round(np.mean(analysis.backRatio),2) == 0.20 # bifi ratio was == 0.22 in v0.2.2
assert np.mean(analysis.Wm2Front) == pytest.approx(899, rel = 0.005) # was 912 in v0.2.3
assert np.mean(analysis.Wm2Back) == pytest.approx(189, rel = 0.02) # was 182 in v0.2.2
"""
def test_Radiance_1axis_gendaylit_modelchains():
# duplicate next sample using modelchain
# 1-axis .ini file
filename = "ini_1axis.ini"
(Params)= bifacial_radiance.load.readconfigurationinputfile(inifile=filename)
Params[0]['testfolder'] = TESTDIR
# unpack the Params tuple with *Params
demo2, analysis = bifacial_radiance.modelchain.runModelChain(*Params)
#V 0.2.5 fixed the gcr passed to set1axis. (since gcr was not being passd to set1axis, gcr was default 0.33 default).
assert(np.mean(demo2.Wm2Front) == pytest.approx(205.0, 0.01) ) # was 214 in v0.2.3 # was 205 in early v0.2.4
assert(np.mean(demo2.Wm2Back) == pytest.approx(43.0, 0.1) )
assert demo2.trackerdict['2001-01-01_1100']['scene'].text.__len__() == 132
assert demo2.trackerdict['2001-01-01_1100']['scene'].text[23:28] == " 2.0 "
"""
def test_RadianceObj_1axis_gendaylit_end_to_end():
name = "_test_1axis_gendaylit_end_to_end"
# 1-axis tracking end-to-end test with torque tube and gap generation.
# Takes 20 seconds for 2-sensor scan
gcr = 0.35 # ground cover ratio, = module_height / pitch
albedo = 0.3 # ground albedo
hub_height = 2 # tracker height at 0 tilt in meters (hub height)
demo = bifacial_radiance.RadianceObj(name) # Create a RadianceObj 'object'
demo.setGround(albedo) # input albedo number or material name like 'concrete'. To see options, run this without any input.
metdata = demo.readWeatherFile(MET_FILENAME, starttime='2001-01-01_0100', endtime = '2001-01-01_2300', coerce_year = 2001) # read in the weather data from above
#metdata = demo.readEPW(MET_FILENAME, starttime='01_01_01', endtime = '01_01_23') # read in the EPW weather data from above
# set module type to be used and passed into makeScene1axis
# test modules with gap and rear tube
module=demo.makeModule(name='test-module',x=0.984,y=1.95,torquetube = True, numpanels = 2, ygap = 0.1)
sceneDict = {'pitch': np.round(module.sceney / gcr,3),'height':hub_height, 'nMods':10, 'nRows':3}
key = '2001-01-01_1100'
# create metdata files for each condition. keys are timestamps for gendaylit workflow
trackerdict = demo.set1axis(cumulativesky=False, gcr=gcr)
# create the skyfiles needed for 1-axis tracking
demo.gendaylit1axis(metdata=metdata, enddate='01/01')
# Create the scene for the 1-axis tracking
demo.makeScene1axis({key:trackerdict[key]}, module='test-module', sceneDict=sceneDict, cumulativesky = False)
#demo.makeScene1axis({key:trackerdict[key]}, module_type,sceneDict, cumulativesky = False, nMods = 10, nRows = 3, modwanted = 7, rowwanted = 3, sensorsy = 2) #makeScene creates a .rad file with 20 modules per row, 7 rows.
demo.makeOct1axis(trackerdict,key) # just run this for one timestep: Jan 1 11am
trackerdict = demo.analysis1axis(trackerdict, singleindex=key, modWanted=7, rowWanted=3, sensorsy=2) # just run this for one timestep: Jan 1 11am
#V 0.2.5 fixed the gcr passed to set1axis. (since gcr was not being passd to set1axis, gcr was default 0.33 default).
assert(np.mean(demo.Wm2Front) == pytest.approx(205.0, 0.01) ) # was 214 in v0.2.3 # was 205 in early v0.2.4
assert(np.mean(demo.Wm2Back) == pytest.approx(43.0, 0.1) )
"""
def test_1axis_gencumSky():
name = "test_1axis_gencumSky"
# Takes 20 seconds for 2-sensor scan
gcr = 0.35 # ground cover ratio, = module_height / pitch
albedo = 0.3 # ground albedo
hub_height = 2 # tracker height at 0 tilt in meters (hub height)
demo = bifacial_radiance.RadianceObj(name) # Create a RadianceObj 'object'
demo.setGround(albedo) # input albedo number or material name like 'concrete'. To see options, run this without any input.
metdata = demo.readWeatherFile(weatherFile=MET_FILENAME, starttime='01_01_01', endtime = '01_01_23', coerce_year=2001) # read in the EPW weather data from above
module=demo.makeModule(name='test-module',x=0.984,y=1.95, numpanels = 2, ygap = 0.1)
pitch= np.round(module.sceney / gcr,3)
trackerdict = demo.set1axis(cumulativesky = True, gcr=gcr)
demo.genCumSky1axis()
assert trackerdict[-45.0]['skyfile'][0:5] == 'skies' # # Having trouble with the \ or // 'skies\\1axis_-45.0.rad'
tempcsv = pd.read_csv(trackerdict[-45.0]['csvfile'], header=None, delimiter=' ')
assert tempcsv.iloc[10,0] == 185
assert tempcsv.__len__() == 8760
sceneDict = {'gcr': gcr,'hub_height':hub_height, 'clearance_height':hub_height, 'nMods':10, 'nRows':3}
trackerdict = demo.makeScene1axis(sceneDict=sceneDict, module = 'test-module')
# Removing all of this other tests for hub_height and height since it's ben identified that
# a new module to handle hub_height and height in sceneDict needs to be implemented
# instead of checking inside of makeScene, makeSceneNxR, and makeScene1axis
assert trackerdict[-5.0]['radfile'][0:7] == 'objects' # 'objects\\1axis-5.0_1.825_11.42_5.0_10x3_origin0,0.rad'
sceneDict = {'pitch': pitch,'clearance_height':hub_height, 'nMods':10, 'nRows':3} # testing height filter too
trackerdict = demo.makeScene1axis(sceneDict=sceneDict, module = 'test-module')
# assert trackerdict[-5.0]['radfile'] == 'objects\\1axis-5.0_1.825_11.42_5.0_10x3_origin0,0.rad'
sceneDict = {'pitch': pitch,'height':hub_height, 'nMods':10, 'nRows':3} # testing height filter too
trackerdict = demo.makeScene1axis(sceneDict=sceneDict, module = 'test-module')
# assert trackerdict[-5.0]['radfile'] == 'objects\\1axis-5.0_1.825_11.42_5.0_10x3_origin0,0.rad'
sceneDict = {'pitch': pitch,'height':hub_height, 'clearance_height':hub_height, 'nMods':10, 'nRows':3} # testing height filter too
trackerdict = demo.makeScene1axis(sceneDict=sceneDict, module = 'test-module')
# assert trackerdict[-5.0]['radfile'] == 'objects\\1axis-5.0_1.825_11.42_5.0_10x3_origin0,0.rad'
sceneDict = {'pitch': pitch,'height':hub_height, 'hub_height':hub_height, 'nMods':10, 'nRows':3} # testing height filter too
trackerdict = demo.makeScene1axis(sceneDict=sceneDict, module = 'test-module')
demo.exportTrackerDict(trackerdict, savefile = 'results\exportedTrackerDict')
assert trackerdict[-5.0]['radfile'][0:7] == 'objects'
#assert trackerdict[-5.0]['radfile'] == 'objects\\1axis-5.0_1.825_11.42_5.0_10x3_origin0,0.rad'
minitrackerdict = {}
minitrackerdict[list(trackerdict)[0]] = trackerdict[list(trackerdict.keys())[0]]
trackerdict = demo.makeOct1axis(trackerdict=minitrackerdict) # just run this for one timestep: Jan 1 11am
trackerdict = demo.analysis1axis(trackerdict=trackerdict, modWanted=7, rowWanted=3, sensorsy=2)
assert trackerdict[-5.0]['AnalysisObj'].x[0] == -10.76304
modscanfront = {}
modscanfront = {'xstart': -5}
trackerdict = demo.analysis1axis(trackerdict=trackerdict, modWanted=7, rowWanted=3, sensorsy=2, modscanfront=modscanfront )
assert trackerdict[-5.0]['AnalysisObj'].x[0] == -5
def test_SceneObj_makeSceneNxR_lowtilt():
# test _makeSceneNxR(tilt, height, pitch, azimuth = 180, nMods = 20, nRows = 7, radname = None)
# default scene with simple_panel, 10 degree tilt, 0.2 height, 1.5 row spacing, landscape
name = "_test_makeSceneNxR_lowtilt"
demo = bifacial_radiance.RadianceObj(name)
demo.makeModule(name='test-module',y=0.95,x=1.59)
#scene = bifacial_radiance.SceneObj(moduletype = name)
#scene._makeSceneNxR(tilt=10,height=0.2,pitch=1.5)
sceneDict={'tilt':10, 'clearance_height':0.2, 'pitch':1.5}
scene = demo.makeScene(module='test-module', sceneDict=sceneDict)
analysis = bifacial_radiance.AnalysisObj()
(frontscan,backscan) = analysis.moduleAnalysis(scene)
assert frontscan.pop('orient') == '-0.000 0.174 -0.985'# was 0,0,-11 in v0.2.4
assert frontscan == pytest.approx({'Nx': 1, 'Ny': 9, 'Nz': 1, 'xinc': 0, 'yinc': 0.093556736536159757,
'xstart': 4.627616431348303e-17,'ystart': -0.3778735578756446,
'zinc': 0.016496576878358378, 'zstart': 0.23717753969161476,
'sx_xinc': 0.0, 'sx_yinc':0.0, 'sx_zinc':0.0})
assert backscan.pop('orient') == '0.000 -0.174 0.985' # was 0,0,1 in v0.2.4
assert backscan == pytest.approx({'Nx': 1, 'Ny': 9, 'Nz': 1, 'xinc': 0, 'yinc': 0.093556736536159757,
'xstart': 4.580831740657635e-17, 'ystart': -0.3740532979669721, 'zinc': 0.016496576878358378,
'zstart': 0.21551176912534617,
'sx_xinc': 0.0, 'sx_yinc':0.0, 'sx_zinc':0.0})
# zstart was 0.01 and zinc was 0 in v0.2.2
#assert scene.text == '!xform -rz -90 -t -0.795 0.475 0 -rx 10 -t 0 0 0.2 -a 20 -t 1.6 0 0 -a 7 -t 0 1.5 0 -i 1 -t -15.9 -4.5 0 -rz 0 objects\\simple_panel.rad'
assert scene.text[0:116] == '!xform -rx 10 -t 0 0 0.2824828843917919 -a 20 -t 1.6 0 0 -a 7 -t 0 1.5 0 -i 1 -t -14.4 -4.5 0 -rz 0 -t 0 0 0 objects' #linux has different directory structure and will error here.
def test_SceneObj_makeSceneNxR_hightilt():
# test _makeSceneNxR(tilt, height, pitch, orientation = None, azimuth = 180, nMods = 20, nRows = 7, radname = None)
# default scene with simple_panel, 50 degree tilt, 0.2 height, 1.5 row spacing, landscape
name = "_test__makeSceneNxR_hightilt"
demo = bifacial_radiance.RadianceObj(name)
demo.makeModule(name='test-module',y=0.95,x=1.59)
#scene = bifacial_radiance.SceneObj(moduletype = name)
#scene._makeSceneNxR(tilt=65,height=0.2,pitch=1.5,azimuth=89)
sceneDict={'tilt':65, 'clearance_height':0.2, 'pitch':1.5, 'azimuth':89}
scene = demo.makeScene(module='test-module', sceneDict=sceneDict)
analysis = bifacial_radiance.AnalysisObj()
(frontscan,backscan) = analysis.moduleAnalysis(scene)
temp = frontscan.pop('orient')
'''
assert [float(x) for x in temp.split(' ')] == pytest.approx([-0.999847695156, -0.0174524064373, 0])
assert frontscan == pytest.approx({'Nx': 1, 'Ny': 1, 'Nz': 9, 'xinc': 0, 'xstart': 0, 'yinc': 0,
'ystart': 0, 'zinc': 0.086099239768481745,'zstart': 0.28609923976848173})
temp2 = backscan.pop('orient')
assert [float(x) for x in temp2.split(' ')] == pytest.approx([0.999847695156, 0.0174524064373, 0])
assert backscan == pytest.approx({'Nx': 1, 'Ny': 1, 'Nz': 9, 'xinc': 0, 'xstart': -0.94985531039857163,
'yinc': 0, 'ystart': -0.016579786115419416, 'zinc': 0.086099239768481745, 'zstart': 0.28609923976848173})
#assert scene.text == '!xform -rz -90 -t -0.795 0.475 0 -rx 65 -t 0 0 0.2 -a 20 -t 1.6 0 0 -a 7 -t 0 1.5 0 -i 1 -t -15.9 -4.5 0 -rz 91 objects\\simple_panel.rad'
assert scene.text[0:93] == '!xform -rx 65 -t 0 0 0.2 -a 20 -t 1.6 0 0 -a 7 -t 0 1.5 0 -i 1 -t -16.0 -4.5 0 -rz 91 objects'
'''
assert [float(x) for x in temp.split(' ')] == pytest.approx([-0.906, -0.016, -0.423]) #was 0,0,-1 in v0.2.4
assert frontscan == pytest.approx({'Nx': 1, 'Ny': 9, 'Nz': 1, 'xinc': -0.040142620018581696, 'xstart': 0.1796000448657153, 'yinc': -0.0007006920388131139,
'ystart': 0.0031349304442418674, 'zinc': 0.08609923976848174,'zstart': 0.2949742232650364,
'sx_xinc': 0.0, 'sx_yinc':0.0, 'sx_zinc':0.0})
temp2 = backscan.pop('orient')
assert [float(x) for x in temp2.split(' ')] == pytest.approx([0.906, 0.016, 0.423]) #was 0,0,1 in v0.2.4
assert backscan == pytest.approx({'Nx': 1, 'Ny': 9, 'Nz': 1,
'xinc': -0.040142620018581696, 'xstart': 0.15966431032235584,
'yinc': -0.0007006920388131139, 'ystart': 0.0027869509033958163,
'zinc': 0.08609923976848174, 'zstart': 0.28567662150674106,
'sx_xinc': 0.0, 'sx_yinc':0.0, 'sx_zinc':0.0})
#assert scene.text == '!xform -rz -90 -t -0.795 0.475 0 -rx 65 -t 0 0 0.2 -a 20 -t 1.6 0 0 -a 7 -t 0 1.5 0 -i 1 -t -15.9 -4.5 0 -rz 91 objects\\simple_panel.rad'
assert scene.text[0:117] == '!xform -rx 65 -t 0 0 0.6304961988424087 -a 20 -t 1.6 0 0 -a 7 -t 0 1.5 0 -i 1 -t -14.4 -4.5 0 -rz 91 -t 0 0 0 objects'
def test_AnalysisObj_linePtsMake3D():
# test linepts = linePtsMake3D(xstart,ystart,zstart,xinc,yinc,zinc,Nx,Ny,Nz,orient):
analysis = bifacial_radiance.AnalysisObj()
linepts = analysis._linePtsMake3D(0,0,0,1,1,1,0,0,0,1,2,3,'0 1 0')
assert linepts == '0 0 0 0 1 0 \r1 1 1 0 1 0 \r0 0 0 0 1 0 \r1 1 1 0 1 0 \r0 0 0 0 1 0 \r1 1 1 0 1 0 \r' # v2.5.0 new linepts because now x and z also increase not only y.
#assert linepts == '0 0 0 0 1 0 \r0 1 0 0 1 0 \r0 0 1 0 1 0 \r0 1 1 0 1 0 \r0 0 2 0 1 0 \r0 1 2 0 1 0 \r'
assert str(analysis)[12:16]=='None'
def test_gendaylit2manual():
name = "_test_gendaylit2manual"
demo = bifacial_radiance.RadianceObj(name)
demo.setGround('litesoil')
skyname = demo.gendaylit2manual(dni = 700, dhi = 100, sunalt = 67, sunaz = 180) # Invented values.
assert skyname[0:5] == 'skies' # Having trouble with the \ or // with 'skies\sky2__test_gendaylit2manual.rad'
def test_SingleModule_HPC():
# 1 module for STC conditions. DNI:900, DHI:100, sun angle: 33 elevation 0 azimuth
name = "_test_SingleModule_end_to_end"
demo = bifacial_radiance.RadianceObj(name, hpc=True) # Create a RadianceObj 'object'
demo.setGround('litesoil')
metdata = demo.readWeatherFile(weatherFile= MET_FILENAME, coerce_year=2001)
timeindex = metdata.datetime.index(pd.to_datetime('2001-06-17 13:0:0 -7'))
demo.gendaylit(timeindex=timeindex, metdata=metdata, debug=True) # 1pm, June 17th
# create a scene using panels in landscape at 10 deg tilt, 1.5m pitch. 0.2 m ground clearance
tilt=demo.getSingleTimestampTrackerAngle(metdata=metdata, timeindex=timeindex, gcr=0.33)
assert tilt == pytest.approx(-6.7, abs = 0.4)
sceneDict = {'tilt':0,'pitch':1.5,'clearance_height':1, 'nMods':1, 'nRows':1}
module=demo.makeModule(name='test-module',y=0.95,x=1.59, xgap=0)
scene = demo.makeScene(module,sceneDict)
#objname='Marker'
#text='! genbox white_EPDM mymarker 0.02 0.02 2.5 | xform -t -.01 -.01 0'
#customObject = demo.makeCustomObject(objname,text)
#demo.appendtoScene(scene.radfiles, customObject, '!xform -rz 0')
print(demo.getfilelist())
octfile = demo.makeOct(demo.getfilelist()) # makeOct combines all of the ground, sky and object files into a .oct file.
analysis = bifacial_radiance.AnalysisObj(octfile, demo.name, hpc=True) # return an analysis object including the scan dimensions for back irradiance
(frontscan,backscan) = analysis.moduleAnalysis(scene, sensorsy=1)
analysis.analysis(octfile, demo.name, frontscan, backscan) # compare the back vs front irradiance
assert analysis.mattype[0][:12] == 'a0.0.a0.test'
assert analysis.rearMat[0][:12] == 'a0.0.a0.test'
assert analysis.x == [0]
assert analysis.y == [0]
assert np.mean(analysis.Wm2Front) == pytest.approx(1025, abs = 2)
analysis.makeImage('side.vp')
analysis.makeFalseColor('side.vp') #TODO: this works on silvanas computer,
# side.vp must exist inside of views folder in test folder... make sure this works
# in other computers
assert np.mean(analysis.Wm2Back) == pytest.approx(166, abs = 6)
demo.makeModule() # return information text about how to makeModule
def test_left_label_metdata():
# left labeled MetObj read in with -1 hour timedelta should be identical to
# right labeled MetObj
import pvlib
import pandas as pd
(tmydata, metadata) = pvlib.iotools.epw.read_epw(MET_FILENAME, coerce_year=2001)
# rename different field parameters to match output from
# pvlib.tmy.readtmy: DNI, DHI, DryBulb, Wspd
tmydata.rename(columns={'dni':'DNI',
'dhi':'DHI',
'temp_air':'DryBulb',
'wind_speed':'Wspd',
'ghi':'GHI',
'albedo':'Alb'
}, inplace=True)
metdata1 = bifacial_radiance.MetObj(tmydata, metadata, label='left')
demo = bifacial_radiance.RadianceObj('test')
metdata2 = demo.readWeatherFile(weatherFile=MET_FILENAME, label='right', coerce_year=2001)
pd.testing.assert_frame_equal(metdata1.solpos[:-1], metdata2.solpos[:-1])
assert metdata2.solpos.index[0] == pd.to_datetime('2001-01-01 07:42:00 -7')
def test_analyzeRow():
# test analyzeRow. Requires metdata for boulder.
name = "_test_analyzeRow"
demo = bifacial_radiance.RadianceObj(name)
demo.setGround(0.2)
metdata = demo.readWeatherFile(weatherFile = MET_FILENAME)
nMods = 2
nRows = 2
sceneDict = {'tilt':0, 'pitch':30, 'clearance_height':3,
'azimuth':90, 'nMods': nMods, 'nRows': nRows}
demo.setGround(0.2)
demo.gendaylit(4020)
demo.makeModule(name='test-module',y=1,x=2, xgap=0.0)
scene = demo.makeScene('test-module',sceneDict) #makeScene creates a .rad file with 20 modules per row, 7 rows.
octfile = demo.makeOct(demo.getfilelist()) # makeOct combines all of the ground, sky and object files into a .oct file.
analysis = bifacial_radiance.AnalysisObj(octfile, demo.name) # return an analysis object including the scan dimensions for back irradiance
rowscan = analysis.analyzeRow(octfile = octfile, scene = scene, name = name,
rowWanted = 1, sensorsy = [3,3])
assert len(rowscan) == 2
assert rowscan.keys()[2] == 'z'
assert len(rowscan[rowscan.keys()[2]][0]) == 3
# Assert z is the same for two different modules
assert rowscan[rowscan.keys()[2]][0][0] == rowscan[rowscan.keys()[2]][1][0]
# Assert Y is different for two different modules
assert rowscan[rowscan.keys()[1]][0][0]+2 == rowscan[rowscan.keys()[1]][1][0]
def test_addMaterialGroundRad():
# test addMaterialGroundRad. requires metdata for boulder.
name = "_test_addMaterial"
demo = bifacial_radiance.RadianceObj(name)
demo.setGround(0.2)
material = 'demoMat'
com = "a demonstration material"
Rrefl = 0.9
Grefl = 0.2
Brefl = 0.9
demo.addMaterial(material=material, Rrefl=Rrefl, Grefl=Grefl, Brefl=Brefl, comment=com)
demo.setGround('demoMat')
assert list(demo.ground.Rrefl) == [0.9]
Rrefl = 0.45
demo.addMaterial(material=material, Rrefl=Rrefl, Grefl=Grefl, Brefl=Brefl, comment=com, rewrite=False)
demo.setGround('demoMat')
assert list(demo.ground.Rrefl) == [0.9]
demo.addMaterial(material=material, Rrefl=Rrefl, Grefl=Grefl, Brefl=Brefl, comment=com, rewrite=True)
demo.setGround('demoMat')
assert list(demo.ground.Rrefl) == [0.45]
def test_verticalmoduleSouthFacing():
# test full routine for Vertical Modules.
name = "_test_verticalSouthFacing"
demo = bifacial_radiance.RadianceObj(name)
demo.setGround(0.2)
metdata = demo.readWeatherFile(weatherFile = MET_FILENAME)
demo.gendaylit(4020)
demo.makeModule(name='test-module',y=2,x=1)
sceneDict = {'gcr': 0.35,'hub_height':2.3, 'tilt': 90, 'azimuth': 180,
'nMods':1, 'nRows': 1}
scene = demo.makeScene('test-module',sceneDict)
octfile = demo.makeOct(demo.getfilelist())
analysis = bifacial_radiance.AnalysisObj(octfile, demo.basename)
frontscan, backscan = analysis.moduleAnalysis(scene, sensorsy = [4,4])
results = analysis.analysis(octfile, demo.basename, frontscan, backscan)
assert analysis.mattype[0][:12] == 'a0.0.a0.test'
assert analysis.mattype[1][:12] == 'a0.0.a0.test'
assert analysis.mattype[2][:12] == 'a0.0.a0.test'
assert analysis.mattype[3][:12] == 'a0.0.a0.test'
assert analysis.rearMat[0][:12] == 'a0.0.a0.test'
assert analysis.rearMat[1][:12] == 'a0.0.a0.test'
assert analysis.rearMat[2][:12] == 'a0.0.a0.test'
assert analysis.rearMat[3][:12] == 'a0.0.a0.test'
assert analysis.x[0] == analysis.x[1]
assert analysis.x[1] == analysis.x[2]
assert round(analysis.x[0]) == 0
assert round(analysis.x[0]) == 0
assert analysis.z[3] == 2.9
def test_verticalmoduleEastFacing():
# test full routine for Vertical Modules.
name = "_test_verticalEastFacing"
demo = bifacial_radiance.RadianceObj(name)
demo.setGround(0.2)
metdata = demo.readWeatherFile(weatherFile = MET_FILENAME)
demo.gendaylit(4020)
demo.makeModule(name='test-module',y=2,x=1)
sceneDict = {'gcr': 0.35,'hub_height':2.3, 'tilt': 90, 'azimuth': 90,
'nMods':1, 'nRows': 1}
scene = demo.makeScene('test-module',sceneDict)
octfile = demo.makeOct(demo.getfilelist())
analysis = bifacial_radiance.AnalysisObj(octfile, demo.basename)
frontscan, backscan = analysis.moduleAnalysis(scene, sensorsy=4)
results = analysis.analysis(octfile, demo.basename, frontscan, backscan)
assert analysis.mattype[0][:12] == 'a0.0.a0.test'
assert analysis.mattype[1][:12] == 'a0.0.a0.test'
assert analysis.mattype[2][:12] == 'a0.0.a0.test'
assert analysis.mattype[3][:12] == 'a0.0.a0.test'
assert analysis.rearMat[0][:12] == 'a0.0.a0.test'
assert analysis.rearMat[1][:12] == 'a0.0.a0.test'
assert analysis.rearMat[2][:12] == 'a0.0.a0.test'
assert analysis.rearMat[3][:12] == 'a0.0.a0.test'
assert analysis.x[0] == analysis.x[1]
assert analysis.x[1] == analysis.x[2]
assert round(analysis.y[0]) == 0
assert round(analysis.y[0]) == 0
assert analysis.z[3] == 2.9
def test_tiltandazimuthModuleTest():
# test full routine for Vertical Modules.
name = "_test_tiltandazimuth"
demo = bifacial_radiance.RadianceObj(name)
demo.setGround(0.2)
metdata = demo.readWeatherFile(weatherFile = MET_FILENAME)
demo.gendaylit(4020)
demo.makeModule(name='test-module',y=2,x=1)
sceneDict = {'gcr': 0.35,'hub_height':2.3, 'tilt': 45, 'azimuth': 135,
'nMods':1, 'nRows': 1}
scene = demo.makeScene('test-module',sceneDict)
octfile = demo.makeOct(demo.getfilelist())
analysis = bifacial_radiance.AnalysisObj(octfile, demo.basename)
frontscan, backscan = analysis.moduleAnalysis(scene, sensorsy = [4,4])
results = analysis.analysis(octfile, demo.basename, frontscan, backscan)
assert analysis.mattype[0] == 'a0.0.a0.test-module.6457'
assert analysis.mattype[1] == 'a0.0.a0.test-module.6457'
assert analysis.mattype[2] == 'a0.0.a0.test-module.6457'
assert analysis.mattype[3] == 'a0.0.a0.test-module.6457'
assert analysis.rearMat[0] == 'a0.0.a0.test-module.2310'
assert analysis.rearMat[1] == 'a0.0.a0.test-module.2310'
assert analysis.rearMat[2] == 'a0.0.a0.test-module.2310'
assert analysis.rearMat[3] == 'a0.0.a0.test-module.2310'
def test_readWeatherFile_extra():
# test mm_DD input, trim=true, Silvana's 15-minute multi-year file
name = "_test_readWeatherFile_extra"
demo = bifacial_radiance.RadianceObj(name)
metdata1 = demo.readWeatherFile(weatherFile = MET_FILENAME,
starttime = '06_01')
metdata2 = demo.readWeatherFile(weatherFile = MET_FILENAME,
starttime = '06_01_12')
starttime = datetime.datetime(2021,6,1,12)
metdata3 = demo.readWeatherFile(weatherFile=MET_FILENAME,
starttime=starttime,
coerce_year=2021)
starttime = pd.to_datetime('2021-06-01')
metdata4 = demo.readWeatherFile(weatherFile = MET_FILENAME,
starttime=starttime
)
assert metdata1.ghi[0] == 2
assert metdata2.ghi[0] == 610
assert metdata3.ghi[0] == 610
assert metdata4.ghi[0] == 2
def test_readWeatherFile_subhourly():
# need to test out is_leap_and_29Feb and _subhourlydatatoGencumskyformat
# and len(tmydata) != 8760 and _readSOLARGIS
name = "_test_readWeatherFile_subhourly_gencumsky"
demo = bifacial_radiance.RadianceObj(name)
metdata = demo.readWeatherFile(weatherFile=MET_FILENAME4,
source='solargis', tz_convert_val= 2 )
assert len(demo.gencumsky_metfile) == 2
gencumsky_file2 = pd.read_csv(demo.gencumsky_metfile[1], delimiter=' ',
header=None)
assert gencumsky_file2.__len__() == 8760
assert gencumsky_file2.iloc[11,0] == pytest.approx(284.0, abs=0.1)
assert metdata.elevation == 497
assert metdata.timezone == 2
def test_customTrackerAngles():
# TODO: I think with the end test on this function the
# test_RadianceObj_set1axis is no longer needed
name = "_test_customTrackerAngles"
demo = bifacial_radiance.RadianceObj(name)
metdata = demo.readWeatherFile(weatherFile=MET_FILENAME5)
assert metdata.meastracker_angle is not None
trackerdict = demo.set1axis(azimuth=90, useMeasuredTrackerAngle=True)
assert trackerdict[-20]['count'] == 3440
trackerdict = demo.set1axis(azimuth=90, useMeasuredTrackerAngle=False)
assert trackerdict[-20]['count'] == 37
| 57.305506
| 225
| 0.672721
|
deb7ee9d7067dedb7c9115f551139c837f8c59ab
| 13,699
|
py
|
Python
|
pyscf/prop/zfs/uhf.py
|
fishjojo/properties
|
6eb98707e0c681f182deefbedf800911feaa7d1f
|
[
"Apache-2.0"
] | 4
|
2021-03-01T03:46:02.000Z
|
2022-03-14T05:34:33.000Z
|
pyscf/prop/zfs/uhf.py
|
fishjojo/properties
|
6eb98707e0c681f182deefbedf800911feaa7d1f
|
[
"Apache-2.0"
] | 1
|
2022-02-23T02:52:45.000Z
|
2022-02-23T02:53:55.000Z
|
pyscf/prop/zfs/uhf.py
|
fishjojo/properties
|
6eb98707e0c681f182deefbedf800911feaa7d1f
|
[
"Apache-2.0"
] | 3
|
2021-05-24T02:29:13.000Z
|
2022-03-14T05:34:34.000Z
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic unrestricted Hartree-Fock zero-field splitting
(In testing)
Refs:
JCP 134, 194113 (2011); DOI:10.1063/1.3590362
PRB 60, 9566 (1999); DOI:10.1103/PhysRevB.60.9566
JCP 127, 164112 (2007); 10.1063/1.2772857
'''
from functools import reduce
import warnings
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
from pyscf.scf import _response_functions # noqa
from pyscf.prop.ssc.rhf import _dm1_mo2ao
from pyscf.data import nist
warnings.warn('Module ZFS is under testing')
def koseki_charge(z):
'''Koseki effective charge in SO correction
Ref:
JPC 96, 10768
JPC, 99, 12764
JPCA, 102, 10430
'''
# JPC 96, 10768
if z <= 2:
return z
elif z <= 10:
return z * (.3 + z * .05)
elif z <= 18:
return z * (1.05 - z * .0125)
elif z <= 30:
return z * ( 0.385 + 0.025 * (z - 18 - 2) ) # Jia: J. Phys. Chem. A 1998, 102, 10430
elif z < 48:
return z * ( 4.680 + 0.060 * (z - 36 - 2) )
else:
return z
def direct_spin_spin(zfsobj, mol, dm0, verbose=None):
if isinstance(dm0, numpy.ndarray) and dm0.ndim == 2: # RHF DM
return numpy.zeros((3,3))
dma, dmb = dm0
spindm = dma - dmb
effspin = mol.spin * .5
nao = dma.shape[0]
# Use QED g-factor or Dirac g-factor
#g_fac = nist.G_ELECTRON**2/4 # QED
g_fac = 1
fac = g_fac * nist.ALPHA**2 / 8 / (effspin * (effspin - .5))
hss = mol.intor('int2e_ip1ip2', comp=9).reshape(3,3,nao,nao,nao,nao)
hss = hss + hss.transpose(0,1,3,2,4,5)
hss = hss + hss.transpose(0,1,2,3,5,4)
ej = numpy.einsum('xyijkl,ji,lk', hss, spindm, spindm)
ek = numpy.einsum('xyijkl,jk,li', hss, spindm, spindm)
dss = (ej - ek) * fac
# 2-electron Fermi contact term
# FC contribution is zero in mean-field calculations because of the 16-fold
# symmetry of the 4-index tensor.
# Generally, in a CI-like wfn, FC may have contributions to the direction
# spin-spin coupling.
if 0:
h_fc = mol.intor('int4c1e').reshape(nao,nao,nao,nao)
ej = numpy.einsum('ijkl,ji,lk', h_fc, spindm, spindm)
ek = numpy.einsum('ijkl,jk,li', h_fc, spindm, spindm)
e_fc = (ej - ek) * fac * (4*numpy.pi/3)
dss -= e_fc * numpy.eye(3)
return dss
# Note mo1 is the imaginary part of MO^1
def make_soc(zfsobj, mol, mo_coeff, mo_occ):
h1 = make_h1_soc(zfsobj, mol, mo_coeff, mo_occ)
mo1 = solve_mo1(zfsobj, h1)
h1aa, h1ab, h1ba, h1bb = h1
mo1aa, mo1ab, mo1ba, mo1bb = mo1
effspin = mol.spin * .5
if 0: # Pederson-Khanna formula , PRB, 60, 9566
fac = -.25 / effspin**2
dso = fac * numpy.einsum('xij,yij->xy', h1aa, mo1aa)
dso += fac * numpy.einsum('xij,yij->xy', h1bb, mo1bb)
dso -= fac * numpy.einsum('xij,yij->xy', h1ab, mo1ab)
dso -= fac * numpy.einsum('xij,yij->xy', h1ba, mo1ba)
elif 0: # Neese formula, see JCP, 127, 164112
facy = -.25 / ((effspin-.5)*effspin)
facz = -.25 / effspin**2
facx = -.25 / ((effspin+.5)*(effspin+1))
dso = facz * numpy.einsum('xij,yij->xy', h1aa, mo1aa)
dso += facz * numpy.einsum('xij,yij->xy', h1bb, mo1bb)
dso -= facx * numpy.einsum('xij,yij->xy', h1ab, mo1ab)
dso -= facy * numpy.einsum('xij,yij->xy', h1ba, mo1ba)
else: # van Wullen formula, JCP, 134, 194113
# Note the sign difference to van Wullen's paper, due to the
# anti-symmetricity of the Hamiltonian
fac = -.25 / (effspin*(effspin-.5))
dso = fac * numpy.einsum('xij,yij->xy', h1aa, mo1aa)
dso += fac * numpy.einsum('xij,yij->xy', h1bb, mo1bb)
dso -= fac * numpy.einsum('xij,yij->xy', h1ab, mo1ab)
dso -= fac * numpy.einsum('xij,yij->xy', h1ba, mo1ba)
dso *= nist.ALPHA ** 4 / 4
return dso
def make_h1_soc(zfsobj, mol, mo_coeff, mo_occ):
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
orboa = mo_coeff[0][:, occidxa]
orbob = mo_coeff[1][:, occidxb]
orbva = mo_coeff[0][:,~occidxa]
orbvb = mo_coeff[1][:,~occidxb]
# hso1e is the imaginary part of [i sigma dot pV x p]
# JCP, 122, 034107 Eq (2) = 1/4c^2 hso1e
if zfsobj.so_eff_charge:
hso1e = 0
for ia in range(mol.natm):
mol.set_rinv_origin(mol.atom_coord(ia))
#FIXME: when ECP is enabled
Z = koseki_charge(mol.atom_charge(ia))
hso1e += -Z * mol.intor('int1e_prinvxp', 3)
else:
hso1e = mol.intor('int1e_pnucxp', 3)
h1aa = numpy.asarray([reduce(numpy.dot, (orbva.T, x, orboa)) for x in hso1e])
h1bb = numpy.asarray([reduce(numpy.dot, (orbvb.T, x, orbob)) for x in hso1e])
h1ab = numpy.asarray([reduce(numpy.dot, (orbva.T, x, orbob)) for x in hso1e])
h1ba = numpy.asarray([reduce(numpy.dot, (orbvb.T, x, orboa)) for x in hso1e])
if zfsobj.sso or zfsobj.soo:
hso2e = make_soc2e(zfsobj, mo_coeff, mo_occ)
else:
hso2e = (0, 0, 0, 0)
h1aa += hso2e[0]
h1ab += hso2e[1]
h1ba += hso2e[2]
h1bb += hso2e[3]
return h1aa, h1ab, h1ba, h1bb
# Using the approximation in JCP, 122, 034107
def make_soc2e(zfsobj, mo_coeff, mo_occ):
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
orbva = mo_coeff[0][:,~occidxa]
orbvb = mo_coeff[1][:,~occidxb]
dma = numpy.dot(orboa, orboa.T)
dmb = numpy.dot(orbob, orbob.T)
dm1 = dma + dmb
nao = dma.shape[0]
# hso2e is the imaginary part of SSO
hso2e = mol.intor('int2e_p1vxp1', 3).reshape(3,nao,nao,nao,nao)
vj = numpy.einsum('yijkl,lk->yij', hso2e, dm1)
vk = numpy.einsum('yijkl,jk->yil', hso2e, dm1)
vk+= numpy.einsum('yijkl,li->ykj', hso2e, dm1)
hso2e = vj - vk * 1.5
haa = numpy.asarray([reduce(numpy.dot, (orbva.T, x, orboa)) for x in hso2e])
hab = numpy.asarray([reduce(numpy.dot, (orbva.T, x, orbob)) for x in hso2e])
hba = numpy.asarray([reduce(numpy.dot, (orbvb.T, x, orboa)) for x in hso2e])
hbb = numpy.asarray([reduce(numpy.dot, (orbvb.T, x, orbob)) for x in hso2e])
return haa, hab, hba, hbb
def solve_mo1(sscobj, h1):
cput1 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(sscobj.stdout, sscobj.verbose)
mo_energy = sscobj._scf.mo_energy
mo_coeff = sscobj._scf.mo_coeff
mo_occ = sscobj._scf.mo_occ
h1aa, h1ab, h1ba, h1bb = h1
nset = len(h1aa)
eai_aa = 1. / lib.direct_sum('a-i->ai', mo_energy[0][mo_occ[0]==0], mo_energy[0][mo_occ[0]>0])
eai_ab = 1. / lib.direct_sum('a-i->ai', mo_energy[0][mo_occ[0]==0], mo_energy[1][mo_occ[1]>0])
eai_ba = 1. / lib.direct_sum('a-i->ai', mo_energy[1][mo_occ[1]==0], mo_energy[0][mo_occ[0]>0])
eai_bb = 1. / lib.direct_sum('a-i->ai', mo_energy[1][mo_occ[1]==0], mo_energy[1][mo_occ[1]>0])
mo1 = (numpy.asarray(h1aa) * -eai_aa,
numpy.asarray(h1ab) * -eai_ab,
numpy.asarray(h1ba) * -eai_ba,
numpy.asarray(h1bb) * -eai_bb)
h1aa = h1ab = h1ba = h1bb = None
if not sscobj.cphf:
return mo1
orboa = mo_coeff[0][:,mo_occ[0]> 0]
orbva = mo_coeff[0][:,mo_occ[0]==0]
orbob = mo_coeff[1][:,mo_occ[1]> 0]
orbvb = mo_coeff[1][:,mo_occ[1]==0]
nocca = orboa.shape[1]
nvira = orbva.shape[1]
noccb = orbob.shape[1]
nvirb = orbvb.shape[1]
p1 = nvira * nocca
p2 = p1 + nvira * noccb
p3 = p2 + nvirb * nocca
def _split_mo1(mo1):
mo1 = mo1.reshape(nset,-1)
mo1aa = mo1[:, :p1].reshape(nset,nvira,nocca)
mo1ab = mo1[:,p1:p2].reshape(nset,nvira,noccb)
mo1ba = mo1[:,p2:p3].reshape(nset,nvirb,nocca)
mo1bb = mo1[:,p3: ].reshape(nset,nvirb,noccb)
return mo1aa, mo1ab, mo1ba, mo1bb
mo1 = numpy.hstack((mo1[0].reshape(nset,-1),
mo1[1].reshape(nset,-1),
mo1[2].reshape(nset,-1),
mo1[3].reshape(nset,-1)))
vresp = mf.gen_response(with_j=False, hermi=0)
mo_va_oa = numpy.asarray(numpy.hstack((orbva,orboa)), order='F')
mo_va_ob = numpy.asarray(numpy.hstack((orbva,orbob)), order='F')
mo_vb_oa = numpy.asarray(numpy.hstack((orbvb,orboa)), order='F')
mo_vb_ob = numpy.asarray(numpy.hstack((orbvb,orbob)), order='F')
def vind(mo1):
mo1aa, mo1ab, mo1ba, mo1bb = _split_mo1(mo1)
dm1aa = _dm1_mo2ao(mo1aa, orbva, orboa)
dm1ab = _dm1_mo2ao(mo1ab, orbva, orbob)
dm1ba = _dm1_mo2ao(mo1ba, orbvb, orboa)
dm1bb = _dm1_mo2ao(mo1bb, orbvb, orbob)
# imaginary Hermitian
dm1 = numpy.vstack([dm1aa-dm1aa.transpose(0,2,1),
dm1ab-dm1ba.transpose(0,2,1),
dm1ba-dm1ab.transpose(0,2,1),
dm1bb-dm1bb.transpose(0,2,1)])
v1 = vresp(dm1)
v1aa = _ao2mo.nr_e2(v1[ :nset ], mo_va_oa, (0,nvira,nvira,nvira+nocca))
v1ab = _ao2mo.nr_e2(v1[nset*1:nset*2], mo_va_ob, (0,nvira,nvira,nvira+noccb))
v1ba = _ao2mo.nr_e2(v1[nset*2:nset*3], mo_vb_oa, (0,nvirb,nvirb,nvirb+nocca))
v1bb = _ao2mo.nr_e2(v1[nset*3: ], mo_vb_ob, (0,nvirb,nvirb,nvirb+noccb))
v1aa = v1aa.reshape(nset,nvira,nocca)
v1ab = v1ab.reshape(nset,nvira,noccb)
v1ba = v1ba.reshape(nset,nvirb,nocca)
v1bb = v1bb.reshape(nset,nvirb,noccb)
v1aa *= eai_aa
v1ab *= eai_ab
v1ba *= eai_ba
v1bb *= eai_bb
v1mo = numpy.hstack((v1aa.reshape(nset,-1), v1ab.reshape(nset,-1),
v1ba.reshape(nset,-1), v1bb.reshape(nset,-1)))
return v1mo.ravel()
mo1 = lib.krylov(vind, mo1.ravel(), tol=1e-9, max_cycle=20, verbose=log)
log.timer('solving FC CPHF eqn', *cput1)
mo1 = _split_mo1(mo1)
return mo1
class ZeroFieldSplitting(lib.StreamObject):
'''dE = I dot gtensor dot s'''
def __init__(self, scf_method):
self.mol = scf_method.mol
self.verbose = scf_method.mol.verbose
self.stdout = scf_method.mol.stdout
self.chkfile = scf_method.chkfile
self._scf = scf_method
self.cphf = True
self.max_cycle_cphf = 20
self.conv_tol = 1e-9
self.sso = False # Two-electron spin-same-orbit coupling
self.soo = False # Two-electron spin-other-orbit coupling
self.so_eff_charge = True
self.mo10 = None
self.mo_e10 = None
self._keys = set(self.__dict__.keys())
logger.warn(self, 'UHF-ZFS is an experimental feature. It is still in '
'testing\nFeatures and APIs may be changed in the future.')
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
log.info('******** %s for %s (In testing) ********',
self.__class__, self._scf.__class__)
log.info('with cphf = %s', self.cphf)
if self.cphf:
log.info('CPHF conv_tol = %g', self.conv_tol)
log.info('CPHF max_cycle_cphf = %d', self.max_cycle_cphf)
log.info('sso = %s (2e spin-same-orbit coupling)', self.sso)
log.info('soo = %s (2e spin-other-orbit coupling)', self.soo)
log.info('so_eff_charge = %s (1e SO effective charge)',
self.so_eff_charge)
return self
def kernel(self, mo1=None):
cput0 = (logger.process_clock(), logger.perf_counter())
self.check_sanity()
self.dump_flags()
mol = self.mol
dm0 = self._scf.make_rdm1()
zfs_ss = direct_spin_spin(self, mol, dm0)
zfs_soc = make_soc(zfsobj, mol, self._scf.mo_coeff, self._scf.mo_occ)
zfs_tensor = zfs_ss + zfs_soc
zfs_diag = numpy.linalg.eigh(zfs_tensor)[0]
dtrace = zfs_tensor.trace()
zfs_diag -= dtrace / 3
zidx = numpy.argmax(abs(zfs_diag))
dvalue = zfs_diag[zidx] * 1.5
tmp = zfs_diag + dvalue/3
tmp[zidx] = 0
evalue = abs(tmp).max()
au2cm = nist.HARTREE2J / nist.PLANCK / nist.LIGHT_SPEED_SI * 1e-2
logger.debug(self, 'D trace = %s', dtrace)
logger.note(self, 'Axial parameter D = %s (cm^{-1})', dvalue*au2cm)
logger.note(self, 'Rhombic parameter E = %s (cm^{-1})', evalue*au2cm)
if self.verbose > logger.debug:
self.stdout.write('\nZero-field splitting tensor\n')
self.stdout.write('S_x %s\n' % zfs_tensor[0])
self.stdout.write('S_y %s\n' % zfs_tensor[1])
self.stdout.write('S_z %s\n' % zfs_tensor[2])
self.stdout.flush()
logger.timer(self, 'ZFS tensor', *cput0)
return zfs_tensor
ZFS = ZeroFieldSplitting
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='Ne 0 0 0',
basis='ccpvdz', spin=2, charge=-2, verbose=3)
mf = scf.UHF(mol)
mf.kernel()
zfsobj = ZFS(mf)
#zfsobj.cphf = False
#zfsobj.sso = True
#zfsobj.soo = True
#zfsobj.so_eff_charge = False
print(zfsobj.kernel())
| 37.634615
| 98
| 0.600263
|
1bc4e215c8ea9219fdb57485160c5c4dbc196e81
| 1,681
|
py
|
Python
|
vsts/vsts/operations/v4_0/models/operation.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/operations/v4_0/models/operation.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/operations/v4_0/models/operation.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .operation_reference import OperationReference
class Operation(OperationReference):
"""Operation.
:param id: The identifier for this operation.
:type id: str
:param status: The current status of the operation.
:type status: object
:param url: Url to get the full object.
:type url: str
:param _links: The links to other objects related to this object.
:type _links: :class:`ReferenceLinks <operations.v4_0.models.ReferenceLinks>`
:param result_message: The result message which is generally not set.
:type result_message: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'status': {'key': 'status', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'},
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'result_message': {'key': 'resultMessage', 'type': 'str'}
}
def __init__(self, id=None, status=None, url=None, _links=None, result_message=None):
super(Operation, self).__init__(id=id, status=status, url=url)
self._links = _links
self.result_message = result_message
| 43.102564
| 94
| 0.556217
|
a49026d60e1ab40e10ba64f8c3d8c2320b5aa578
| 4,501
|
py
|
Python
|
tests/checkout_repo/basicgit_test.py
|
vincent-l-j/micropython-stubber
|
7dbbc937baffedd7fdbd5ac9f94555f2c93b98d0
|
[
"MIT"
] | null | null | null |
tests/checkout_repo/basicgit_test.py
|
vincent-l-j/micropython-stubber
|
7dbbc937baffedd7fdbd5ac9f94555f2c93b98d0
|
[
"MIT"
] | null | null | null |
tests/checkout_repo/basicgit_test.py
|
vincent-l-j/micropython-stubber
|
7dbbc937baffedd7fdbd5ac9f94555f2c93b98d0
|
[
"MIT"
] | null | null | null |
import sys
import os
import pytest
import subprocess
from pathlib import Path
from pytest_mock import MockerFixture
from mock import MagicMock
from typing import List
from subprocess import CompletedProcess
# make sure that the source can be found
RootPath = Path(os.getcwd())
src_path = str(RootPath / "src")
if not src_path in sys.path:
sys.path.append(src_path)
# pylint: disable=wrong-import-position,import-error
# Module Under Test
import stubber.basicgit as git
def common_tst(tag):
# print(tag)
assert isinstance(tag, str), "tag must be a string"
if tag != "latest":
assert tag.startswith("v"), "tags start with a v"
assert len(tag) >= 2, "tags are longer than 2 chars"
def test_git_clone_shallow(tmp_path):
result = git.clone("https://github.com/micropython/micropython.git", tmp_path / "micropython")
assert result == True
def test_git_clone(tmp_path):
result = git.clone("https://github.com/micropython/micropython.git", tmp_path / "micropython", shallow=False)
assert result == True
def test_git_clone_fast(mocker: MockerFixture, tmp_path):
result = CompletedProcess(
args=[
"git",
"clone",
"https://github.com/micropython/micropython.git",
"C:\\\\Users\\\\josverl\\\\AppData\\\\Local\\\\Temp\\\\pytest-of-josverl\\\\pytest-225\\\\test_git_clone0\\\\micropython",
],
returncode=0,
)
mock: MagicMock = mocker.MagicMock(return_value=result)
mocker.patch("stubber.basicgit.subprocess.run", mock)
result = git.clone("https://github.com/micropython/micropython.git", tmp_path / "micropython", shallow=False)
assert result == True
@pytest.mark.basicgit
# @pytest.mark.skip(reason="test discards uncomitted changes in top repo")
def test_get_tag_current():
if not os.path.exists(".git"):
pytest.skip("no git repo in current folder")
else:
# get tag of current repro
tag = git.get_tag()
common_tst(tag)
@pytest.mark.basicgit
def test_get_tag_latest():
repo = Path("./micropython")
if not (repo / ".git").exists():
pytest.skip("no git repo in current folder")
result = subprocess.run(["git", "switch", "main", "--force"], capture_output=True, check=True, cwd=repo.as_posix())
assert result.stderr == 0
# get tag of current repro
tag = git.get_tag("./micropython")
assert tag == "latest"
@pytest.mark.basicgit
def test_get_failure_throws():
with pytest.raises(Exception):
git.get_tag(".not")
@pytest.mark.basicgit
@pytest.mark.skip(reason="test discards uncomitted changes in top repo")
def test_pull_main(testrepo_micropython):
"test and force update to most recent"
repo_path = testrepo_micropython
x = git.pull(repo_path, "main")
# Should succeed.
assert x
@pytest.mark.basicgit
def test_get_tag_submodule(testrepo_micropython: Path):
# get version of submodule repro
for testcase in [
testrepo_micropython.as_posix(),
str(testrepo_micropython),
".\\micropython",
]:
tag = git.get_tag(testcase)
common_tst(tag)
@pytest.mark.basicgit
@pytest.mark.skip(reason="test discards uncomitted changes in top repo")
def test_checkout_sibling(testrepo_micropython):
repo_path = testrepo_micropython
x = git.get_tag(repo_path)
assert x
for ver in ["v1.11", "v1.9.4", "v1.12"]:
git.checkout_tag(ver, repo=repo_path)
assert git.get_tag(repo_path) == ver
git.checkout_tag(x, repo=repo_path)
assert git.get_tag(repo_path) == x, "can restore to prior version"
def test_fetch():
with pytest.raises(NotADirectoryError):
git.fetch(repo=None) # type: ignore
git.fetch(repo=".")
def test_run_git_fails(mocker: MockerFixture):
"test what happens if _run_git fails"
def mock_run_git_1(cmd: List[str], repo=None, expect_stderr=False):
return None
mocker.patch("stubber.basicgit._run_git", mock_run_git_1)
# fail to fetch
r = git.fetch(repo=".")
assert r == False
# fail to get tag
r = git.get_tag()
assert r == None
# fail to checkout tag
r = git.checkout_tag("v1.10")
assert r == False
# fail to checkout commit
r = git.checkout_commit(commit_hash="123")
assert r == False
# fail to switch tag
r = git.switch_tag(tag="v1.10")
assert r == False
# fail to switch branch
r = git.switch_branch(branch="foobar")
assert r == False
| 27.278788
| 134
| 0.672962
|
3937fc704bc83ed2d9a1b6ec40beb740bb224d4c
| 801
|
py
|
Python
|
Tests/test_escapes.py
|
ZiRO-Bot/TagScript
|
99a7999d61d891b0ed75ad2f2767eb49a7a9abc6
|
[
"CC-BY-4.0"
] | 9
|
2021-03-12T19:52:15.000Z
|
2022-01-23T11:50:32.000Z
|
Tests/test_escapes.py
|
ZiRO-Bot/TagScript
|
99a7999d61d891b0ed75ad2f2767eb49a7a9abc6
|
[
"CC-BY-4.0"
] | 7
|
2021-03-19T05:15:31.000Z
|
2021-07-03T10:24:49.000Z
|
Tests/test_escapes.py
|
ZiRO-Bot/TagScript
|
99a7999d61d891b0ed75ad2f2767eb49a7a9abc6
|
[
"CC-BY-4.0"
] | 15
|
2021-03-08T01:17:01.000Z
|
2022-03-21T09:47:42.000Z
|
import TagScriptEngine as tse
blocks = [
tse.MathBlock(),
tse.RandomBlock(),
tse.RangeBlock(),
tse.AnyBlock(),
tse.IfBlock(),
tse.AllBlock(),
tse.BreakBlock(),
tse.StrfBlock(),
tse.StopBlock(),
tse.AssignmentBlock(),
tse.FiftyFiftyBlock(),
tse.ShortCutRedirectBlock("args"),
tse.LooseVariableGetterBlock(),
tse.SubstringBlock(),
tse.EmbedBlock(),
tse.ReplaceBlock(),
tse.PythonBlock(),
tse.URLEncodeBlock(),
tse.RequireBlock(),
tse.BlacklistBlock(),
tse.CommandBlock(),
tse.OverrideBlock(),
]
engine = tse.Interpreter(blocks)
msg = tse.escape_content("message provided :")
response = engine.process("{if({msg}==):provide a message|{msg}}", {"msg": tse.StringAdapter(msg)})
print(response)
print(response.body)
| 23.558824
| 99
| 0.657928
|
81a49b79c8d75b44625460e0bf51c5a5c0e543a7
| 1,066
|
py
|
Python
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/sas_token_info.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/sas_token_info.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 2
|
2016-09-30T21:40:24.000Z
|
2017-11-10T18:16:18.000Z
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/sas_token_info.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SasTokenInfo(Model):
"""SAS token information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar access_token: the access token for the associated Azure Storage
Container.
:vartype access_token: str
"""
_validation = {
'access_token': {'readonly': True},
}
_attribute_map = {
'access_token': {'key': 'accessToken', 'type': 'str'},
}
def __init__(self):
super(SasTokenInfo, self).__init__()
self.access_token = None
| 28.810811
| 76
| 0.58818
|
512beab7e34165f7e5d4a64811732687b99087fc
| 881
|
py
|
Python
|
AI_Web/GA/migrations/0001_initial.py
|
xwy27/ArtificialIntelligenceProjects
|
e2b0154f07d749084e2d670260fa82f8f5ea23ed
|
[
"MIT"
] | 4
|
2018-12-19T14:10:56.000Z
|
2021-07-12T06:05:17.000Z
|
AI_Web/GA/migrations/0001_initial.py
|
xwy27/ArtificialIntelligenceProjects
|
e2b0154f07d749084e2d670260fa82f8f5ea23ed
|
[
"MIT"
] | 1
|
2019-08-06T01:57:41.000Z
|
2019-08-06T01:57:41.000Z
|
AI_Web/GA/migrations/0001_initial.py
|
xwy27/ArtificialIntelligenceProjects
|
e2b0154f07d749084e2d670260fa82f8f5ea23ed
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1 on 2018-11-09 08:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('X', models.IntegerField(default=0)),
('Y', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='GAState',
fields=[
('id', models.IntegerField(default=0, primary_key=True, serialize=False)),
('Process', models.FloatField(default=100)),
('Temperature', models.FloatField(default=0)),
('Path', models.CharField(max_length=10000)),
],
),
]
| 27.53125
| 90
| 0.531215
|
b72cdb4e64e78b2861781d88c438526200fc7489
| 3,907
|
py
|
Python
|
vendors/marvell/WMSDK/mw320/sdk/tools/OpenOCD/openocd.py
|
ictk-solution-dev/amazon-freertos
|
cc76512292ddfb70bba3030dbcb740ef3c6ead8b
|
[
"MIT"
] | 2
|
2020-06-23T08:05:58.000Z
|
2020-06-24T01:25:51.000Z
|
vendors/marvell/WMSDK/mw320/sdk/tools/OpenOCD/openocd.py
|
ictk-solution-dev/amazon-freertos
|
cc76512292ddfb70bba3030dbcb740ef3c6ead8b
|
[
"MIT"
] | 2
|
2022-03-29T05:16:50.000Z
|
2022-03-29T05:16:50.000Z
|
vendors/marvell/WMSDK/mw320/sdk/tools/OpenOCD/openocd.py
|
ictk-solution-dev/amazon-freertos
|
cc76512292ddfb70bba3030dbcb740ef3c6ead8b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# Copyright (C) 2018 Marvell International Ltd.
# All Rights Reserved.
# Load application to ram helper script
# Note: sys.stdout.flush() and sys.stderr.flush() are required for proper
# console output in eclipse
import os, sys, platform, getopt, subprocess
from sys import platform as _platform
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# We define which as it may not be available on Windows
def which(program):
if _platform == "win32" or _platform == "win64" or _platform == "cygwin":
program = program + '.exe'
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return ""
def get_openocd():
global OPENOCD
if _platform == "linux" or _platform == "linux2":
if (platform.machine() == "i686"):
OPENOCD = which(SCRIPT_DIR + "/Linux/openocd")
else:
OPENOCD = which(SCRIPT_DIR + "/Linux/openocd64")
if not len(OPENOCD):
OPENOCD = which("openocd")
elif _platform == "darwin":
OPENOCD = which("openocd")
elif _platform == "win32" or _platform == "win64" or _platform == "cygwin":
OPENOCD = which(SCRIPT_DIR + "/Windows/openocd")
if not len(OPENOCD):
print "Error: Please install OpenOCD for your platform"
sys.exit()
def file_path(file_name):
if _platform == "win32" or _platform == "win64":
if len(which("cygpath")):
return subprocess.Popen(['cygpath', '-m', file_name], stdout = subprocess.PIPE).communicate()[0].strip()
else:
return file_name.replace('\\', '/')
elif _platform == "cygwin":
return subprocess.Popen(['cygpath', '-m', file_name], stdout = subprocess.PIPE).communicate()[0].strip()
else:
return file_name
def print_usage():
print ""
print "Usage:"
print sys.argv[0]
print "Optional Usage:"
print " [<-i | --interface> <JTAG hardware interface name>]"
print " Supported ones are ftdi, jlink, amontec, malink and stlink. Default is ftdi."
print " [-t | --tcp]"
print " Start in TCP/IP mode. Default is pipe mode."
print " [-h | --help]"
print " Display usage"
sys.stdout.flush()
def main():
global SCRIPT_DIR
SCRIPT_DIR = file_path(SCRIPT_DIR)
IFC_FILE = (os.getenv("DEBUG_INTERFACE", "ftdi") or "ftdi") + '.cfg'
TCPIP_MODE = 0
get_openocd()
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "i:th", ["interface=","tcp","help"])
if len(args):
print_usage()
sys.exit()
except getopt.GetoptError as e:
print e
print_usage()
sys.exit()
for opt, arg in opts:
if opt in ("-i", "--interface"):
IFC_FILE = arg + '.cfg'
elif opt in ("-t", "--tcp"):
TCPIP_MODE = 1
elif opt in ("-h", "--help"):
print_usage()
sys.exit()
print "Using OpenOCD interface file", IFC_FILE
sys.stdout.flush()
if (TCPIP_MODE == 1):
subprocess.call ([OPENOCD, '-s', SCRIPT_DIR + '/interface', '-f', IFC_FILE, '-s', SCRIPT_DIR, '-f','openocd.cfg'])
else:
subprocess.call ([OPENOCD, '-s', SCRIPT_DIR + '/interface', '-f', IFC_FILE, '-s', SCRIPT_DIR, '-f','openocd.cfg', '-c', 'gdb_port pipe; log_output openocd.log'])
sys.stderr.flush()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| 34.575221
| 170
| 0.571794
|
6049b71e95e6893b0e7e17183df1ca3bf420baf5
| 28,574
|
py
|
Python
|
multiqc_sav/modules/sav.py
|
MultiQC/MultiQC_SAV
|
bd057a454108b5c6f4af2ad077e895f4476dc688
|
[
"MIT"
] | 1
|
2021-06-01T08:47:19.000Z
|
2021-06-01T08:47:19.000Z
|
multiqc_sav/modules/sav.py
|
MultiQC/MultiQC_SAV
|
bd057a454108b5c6f4af2ad077e895f4476dc688
|
[
"MIT"
] | 2
|
2021-06-16T09:15:25.000Z
|
2022-01-28T12:57:28.000Z
|
multiqc_sav/modules/sav.py
|
MultiQC/MultiQC_SAV
|
bd057a454108b5c6f4af2ad077e895f4476dc688
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import glob
import logging
import os
import re
import xml.etree.ElementTree as ET
from collections import OrderedDict
from datetime import datetime
from typing import Dict
import interop
import numpy
import pandas as pd
from interop import py_interop_plot
from multiqc import config
from multiqc.modules.base_module import BaseMultiqcModule
from multiqc.plots import bargraph, heatmap, linegraph, scatter, table
from multiqc.utils import mqc_colour
# Initialise the main MultiQC logger
log = logging.getLogger("multiqc")
HEADERS = {
"Error Rate": {
"title": "Error Rate (%)",
"description": "The calculated error rate, as determined by a PhiX spike-in",
"min": 0,
"max": 100,
"suffix": "%",
"format": "{:,.0f}", # No decimal places please
},
"Error Rate 35": {
"title": "Error Rate 35 Cycles (%)",
"description": "The calculated error rate for cycles 1-35.",
"min": 0,
"max": 100,
"suffix": "%",
"format": "{:,.0f}", # No decimal places please
"hidden": True,
},
"Error Rate 50": {
"title": "Error Rate 35 Cycles (%)",
"description": "The calculated error rate for cycles 1-50.",
"min": 0,
"max": 100,
"suffix": "%",
"format": "{:,.0f}", # No decimal places please
"hidden": True,
},
"Error Rate 75": {
"title": "Error Rate 35 Cycles (%)",
"description": "The calculated error rate for cycles 1-75.",
"min": 0,
"max": 100,
"suffix": "%",
"format": "{:,.0f}", # No decimal places please
"hidden": True,
},
"Error Rate 100": {
"title": "Error Rate 100 Cycles (%)",
"description": "The calculated error rate for cycles 1-100.",
"min": 0,
"max": 100,
"suffix": "%",
"format": "{:,.0f}", # No decimal places please
"hidden": True,
},
"First Cycle Intensity": {
"title": "Intensity Cycle 1",
"description": "The average of the A channel intensity measured at the first cycle",
},
"% Aligned": {
"title": "Aligned (%)",
"description": "Percentage of reads that aligned to the PhiX genome",
"suffix": "%",
"min": 0,
"max": 100,
"format": "{:,.0f}", # No decimal places please
},
"% >= Q30": {
"title": "% >= Q30",
"description": "Percentage of reads with quality phred score of 30 or above",
"min": 0,
"max": 100,
"suffix": "%",
"format": "{:,.0f}", # No decimal places please
},
"% Occupancy Proxy": {
"title": "Occupancy Proxy (%)",
# "description": "",
"suffix": "%",
"format": "{:,.0f}", # No decimal places please
},
"% Occupied": {
"title": "Occupied (%)",
"description": "The percentage of nanowells occupied by clusters, +/- 1 standard deviation.",
"suffix": "%",
"format": "{:,.0f}", # No decimal places please
},
"Projected Yield G": {
"title": "Projected Yield ({})".format(config.base_count_prefix),
"description": "The expected number of bases sequenced ({} base pairs over all 'usable cycles'".format(
config.base_count_desc
),
"shared_key": "base_count",
"modify": lambda x: (x * 1000000000.0) * config.base_count_multiplier, # number is already in gigabases
"hidden": True,
},
"Yield G": {
"title": "Yield ({})".format(config.read_count_prefix),
"description": "The number of bases sequenced ({} base pairs over all 'usable cycles'".format(
config.base_count_desc
),
"shared_key": "base_count",
"modify": lambda x: (x * 1000000000.0) * config.base_count_multiplier, # number is already in gigabases
},
"Cluster Count": {
"title": "Clusters ({})".format(config.read_count_prefix),
"description": "Number of clusters for each tile ({})".format(config.read_count_desc),
"shared_key": "cluster_count",
"modify": lambda x: x * config.read_count_multiplier,
},
"Cluster Count Pf": {
"title": "Clusters PF ({})".format(config.read_count_prefix),
"description": "Number of clusters PF for each tile ({})".format(config.read_count_desc),
"shared_key": "cluster_count",
"modify": lambda x: x * config.read_count_multiplier,
},
"% Pf": {
"title": "Reads PF (%)",
"description": "Percentage of clusters Passing Filter",
"min": 0,
"max": 100,
"suffix": "%",
"format": "{:,.0f}", # No decimal places please
},
"Density": {
"title": "Density",
"description": "The density of clusters (in thousands per mm2) detected by image analysis, +/- 1 standard deviation.",
"hidden": True,
},
"Density Pf": {
"title": "Density PF",
"description": "The density of clusters PF (in thousands per mm2) detected by image analysis, +/- 1 standard deviation.",
"hidden": True,
},
"Phasing": {
"title": "Phasing",
"description": "The value used by RTA for the percentage of molecules in a cluster for which sequencing falls behind (phasing) the current cycle within a read.",
},
"Phasing Offset": {
"title": "Phasing Offset",
"description": "The best-fit offset of the phasing corrections, calculated from the entire read.",
"hidden": True,
},
"Phasing Slope": {
"title": "Phasing Slope",
"description": "The best-fit slope of the phasing corrections, calculated from the entire read.",
"hidden": True,
},
"Prephasing": {
"title": "Prephasing",
"description": "The value used by RTA for the percentage of molecules in a cluster for which sequencing jumps ahead (prephasing) the current cycle within a read.",
},
"Prephasing Offset": {
"title": "Prephasing Offset",
"description": "The best-fit offset of the prephasing corrections, calculated from the entire read.",
"hidden": True,
},
"Prephasing Slope": {
"title": "Prephasing Slope",
"description": "The best-fit slope of the prephasing corrections, calculated from the entire read.",
"hidden": True,
},
"Reads": {
"title": "{} Reads".format(config.read_count_prefix),
"description": "The number of reads ({})".format(config.read_count_desc),
"shared_key": "read_count",
"modify": lambda x: x * config.read_count_multiplier,
},
"Reads Pf": {
"title": "{} PF Reads".format(config.read_count_prefix),
"description": "The number of passing filter reads ({})".format(config.read_count_desc),
"shared_key": "read_count",
"modify": lambda x: x * config.read_count_multiplier,
},
"Tile Count": {"title": "Tiles", "description": "The number of tiles per lane.", "hidden": True,},
"Total Pf Reads": {
"title": "{} PF Reads".format(config.read_count_prefix),
"description": "The total number of passing filter reads for this lane ({})".format(config.read_count_desc),
"modify": lambda x: float(x) * config.read_count_multiplier,
"format": "{:,.2f}",
"shared_key": "read_count",
},
"Total Reads": {
"title": "{} Reads".format(config.read_count_prefix),
"description": "The total number of reads for this lane ({})".format(config.read_count_desc),
"modify": lambda x: float(x) * config.read_count_multiplier,
"format": "{:,.2f}",
"shared_key": "read_count",
},
"Mapped Reads Cv": {
"title": "CV",
"description": "The coefficient of variation for the number of counts across all indexes.",
"format": "{:,.2f}", # 2 decimal places please
},
"Max Mapped Reads": {
"title": "{} Max Mapped Reads".format(config.read_count_prefix),
"description": "The highest representation for any index ({})".format(config.read_count_desc),
"modify": lambda x: float(x) * config.read_count_multiplier,
"format": "{:,.2f}",
"shared_key": "read_count",
},
"Min Mapped Reads": {
"title": "{} Min Mapped Reads".format(config.read_count_prefix),
"description": "The lowest representation for any index ({})".format(config.read_count_desc),
"modify": lambda x: float(x) * config.read_count_multiplier,
"format": "{:,.2f}",
"shared_key": "read_count",
},
"Total Fraction Mapped Reads": {"hidden": True},
"Fraction Mapped": {"hidden": True},
"Index1": {"title": "Index 1 (I7)", "description": "The sequence for the first Index Read.",},
"Index2": {"title": "Index 2 (I5)", "description": "The sequence for the second Index Read",},
"Project Name": {"title": "Project Name", "description": "Sample Project Name",},
"Sample Id": {"title": "Sample ID", "description": "The Sample ID given in the SampleSheet",},
}
class SAV(BaseMultiqcModule):
"""
Generate SAV tables an Graphs including:
- GRAPH: Intensity/Cycle/Channel
- GRAPH: Clusters/Lane
- GRAPH: Qscore Heatmap
- GRAPH: Qscore Histogram
- GRAPH: %Occ/%PF
- TABLE: Run Summary
"""
def __init__(self):
super(SAV, self).__init__(
name="Illumina SAV", anchor="sav", info=" - Sequencing Metrics from Illumina sequencers",
)
# Set variables
run_info_xml = ""
run_parameters_xml = ""
illumina_dir = ""
# Check if required files are found
for f in self.find_log_files("SAV/xml"):
if re.match(r".*[Rr]un[Ii]nfo\.xml", f["fn"]):
run_info_xml = os.path.join(f["root"], f["fn"])
if re.match(r".*[Rr]un[Pp]arameters\.xml", f["fn"]):
run_parameters_xml = os.path.join(f["root"], f["fn"])
# Assume single run for now
if (os.path.dirname(run_info_xml) == os.path.dirname(run_parameters_xml)) and len(
glob.glob(os.path.join(os.path.dirname(run_info_xml), "InterOp/*.bin"))
) > 0:
illumina_dir = os.path.dirname(run_info_xml)
else:
log.debug("Skipping MultiQC_SAV, required files were not found or not in the right structure.")
return None
self.set_run_info(run_info_xml)
self.load_metrics(illumina_dir)
self.summary_qc()
self.q_summary()
self.imaging_qc()
def load_metrics(self, illumina_dir) -> None:
log.info("Loading Run Metrics")
self.run_metrics = interop.read(run=illumina_dir, valid_to_load=interop.load_imaging_metrics(), finalize=True,)
#############
# RUN INFO
#############
def set_run_info(self, run_info_xml: str) -> None:
log.info("Loading Run Info")
run_info_xml = ET.parse(run_info_xml)
root = run_info_xml.getroot()
for run in root:
run_number = run.attrib["Number"]
flowcell = [fc.text for fc in run.iter("Flowcell")][0]
instrument_id = [fc.text for fc in run.iter("Instrument")][0]
run_date = [fc.text for fc in run.iter("Date")][0]
try:
parsed_run_date = datetime.strftime(datetime.strptime(run_date, "%y%m%d"), "%d-%m-%Y")
except ValueError:
parsed_run_date = datetime.strftime(datetime.strptime(run_date, "%m/%d/%Y %I:%M:%S %p"), "%d-%m-%Y")
read_info = ""
for read in run.iter("Read"):
key = (
f"Read {read.attrib['Number']} (I)"
if read.attrib["IsIndexedRead"] == "Y"
else f"Read {read.attrib['Number']}"
)
read_info += f"<li><b>{key}</b>: {read.attrib['NumCycles']} Cycles</li>"
self.add_section(
name="Run Info",
anchor="sav-run-info",
content=f"""
<div class="container-fluid">
<div class="row">
<div class="col-sm-4">
<h4>Instrument</h4>
<ul>
<li><b>Instrument ID:</b> {instrument_id}</li>
<li><b>Flowcell:</b> {flowcell}</li>
<li><b>Run Number:</b> {run_number}</li>
<li><b>Run Date:</b> {parsed_run_date}</li>
</ul>
</div>
<div class="col-sm-4">
<h4>Settings</h4>
<ul>
{read_info}
</ul>
</div>
</div>
</div>
""",
)
#############
# SUMMARY QC
#############
def summary_qc(self) -> None:
"""
Generate MultiQC sections related to Summary tables
:return: None
"""
log.info("Gathering Read summary metrics")
summary_read = pd.DataFrame(interop.summary(self.run_metrics, level="Read"))
summary_nonindex = pd.DataFrame(interop.summary(self.run_metrics, level="NonIndex"))
summary_total = pd.DataFrame(interop.summary(self.run_metrics, level="Total"))
self.add_section(
name="Summary Read Metrics",
anchor="sav-read-summary",
description="Summary metrics per Read",
plot=self.read_summary_table(self.parse_read_summary(summary_read, summary_nonindex, summary_total)),
)
log.info("Gathering Lane summary metrics")
summary_lane = pd.DataFrame(interop.summary(self.run_metrics, level="Lane"))
self.add_section(
name="Summary Lane Metrics",
anchor="sav-lane-summary",
description="Summary metrics per Lane per Read",
plot=self.lane_summary_table(self.parse_lane_summary(summary_lane)),
)
# - GRAPH: Clusters/Lane
log.info("Generating 'Clusters/Lane' plot")
self.add_section(
name="Clusters/Reads per Lane",
anchor="sav-clusters-lane",
description="Total Cluster/Read count per Lane",
plot=self.clusters_lane_plot(self.parse_lane_summary(summary_lane)),
)
def parse_read_summary(
self, read_metrics: pd.DataFrame, non_index_metrics: pd.DataFrame, total_metrics: pd.DataFrame
) -> Dict:
"""
Parse "Read Summary" table DataFrame
:return: Dict containing table data
"""
table_data: dict = self._parse_reads(read_metrics)
for read, data in non_index_metrics.iterrows():
table_data["Non-Indexed"] = data.to_dict()
for read, data in total_metrics.iterrows():
table_data["Total"] = data.to_dict()
return table_data
def read_summary_table(self, data: pd.DataFrame) -> table.plot:
"""
Format "Read Summary" data dict and add plot config.
:return: table object to be used in a MultiQC section
"""
headers = {header: HEADERS[header] for header in interop.summary_columns(level="Lane")}
table_config = {
"namespace": "SAV",
"id": "sav-read-metrics-summary-table",
"col1_header": "Read",
}
return table.plot(data, headers, table_config)
def parse_lane_summary(self, data: pd.DataFrame) -> Dict:
"""
Parse "Lane Summary" table DataFrame
:return: Dict containing table data
"""
lanes = data.groupby("Lane")
table_data: dict = {}
for lane, reads in lanes:
lane_data = {}
reads_dict = self._parse_reads(reads, key_prefix=f"Lane {lane}")
table_data.update(reads_dict)
return table_data
def lane_summary_table(self, data: Dict) -> table.plot:
"""
Format "Lane Summary" data dict and add plot config.
:return: table object to be used in a MultiQC section
"""
headers = {header: HEADERS[header] for header in interop.summary_columns(level="Lane")}
table_config = {
"namespace": "SAV",
"id": "sav-lane-metrics-summary-table",
"col1_header": "Lane - Read",
}
return table.plot(data, headers, table_config,)
def clusters_lane_plot(self, data: Dict) -> bargraph.plot:
"""
Format "Clusters per Lane" data dict and add plot config.
:return: bar plot object to be used in a MultiQC section
"""
cluster_data = {}
read_data = {}
for key, value in data.items():
lane = int(value["Lane"])
if f"Lane {lane}" not in cluster_data:
cluster_data[f"Lane {lane}"] = {
"clusters": value["Cluster Count"],
"clusters_pf": value["Cluster Count Pf"],
"clusters_diff": value["Cluster Count"] - value["Cluster Count Pf"],
}
read_data[f"Lane {lane}"] = {
"reads": value["Reads"],
"reads_pf": value["Reads Pf"],
"reads_diff": value["Reads"] - value["Reads Pf"],
}
else:
cluster_data[f"Lane {lane}"]["clusters"] += value["Cluster Count"]
cluster_data[f"Lane {lane}"]["clusters_pf"] += value["Cluster Count Pf"]
cluster_data[f"Lane {lane}"]["clusters_diff"] += value["Cluster Count"] - value["Cluster Count Pf"]
read_data[f"Lane {lane}"]["reads"] += value["Reads"]
read_data[f"Lane {lane}"]["reads_pf"] += value["Reads Pf"]
read_data[f"Lane {lane}"]["reads_diff"] += value["Reads"] - value["Reads Pf"]
cats = [OrderedDict(), OrderedDict()]
cats[0]["clusters_pf"] = {"name": "Clusters PF"}
cats[0]["clusters_diff"] = {"name": "Clusters not PF"}
cats[1]["reads_pf"] = {"name": "Reads PF"}
cats[1]["reads_diff"] = {"name": "Reads not PF"}
plot_config = {
"id": "sav-summary-clusters-reads-lane-plot",
"title": "SAV: Cluster/Reads per Lane",
"data_labels": ["Clusters", "Reads"],
"ylab": "Lane",
}
return bargraph.plot([cluster_data, read_data], cats, plot_config)
def _parse_reads(self, reads_df: pd.DataFrame, key_prefix: str = None) -> Dict:
"""
Utility function to parse a "Reads" dataframe to dict
:return: Reads dict
"""
reads_dict = {}
reads_df = reads_df.set_index("ReadNumber")
for read, data in reads_df.iterrows():
key = f"Read {read}" + " (I)" if data["IsIndex"] == 89 else f"Read {read}"
if key_prefix:
key = f"{key_prefix} - {key}"
reads_dict[key] = data.drop("IsIndex").to_dict()
return reads_dict
#############
# Q SUMMARY
#############
def q_summary(self) -> None:
"""
Generate MultiQC sections related to Qscore
:return: None
"""
# - GRAPH: Qscore Heatmap
log.info("Generating 'Qscore Heatmap' plot")
self.add_section(
name="Qscore Heatmap",
anchor="sav-qscore-heatmap",
description="The Qscore Heat Map provides an overview of quality scores across cycles.",
plot=self.qscore_heatmap_plot(),
)
# - GRAPH: Qscore Histogram
log.info("Generating 'Qscore Histogram' plot")
self.add_section(
name="Qscore Histogram",
anchor="sav-qscore-histogram",
description="Qscore Histogram graphs the number of bases by quality score. The quality score is cumulative for the current cycle. Only bases from reads that pass the quality filter are included.",
plot=self.qscore_histogram_plot(),
)
def qscore_heatmap_plot(self) -> heatmap.plot:
"""
Get heatmap data from run_metrics object
Note: this function has *much* room for improvement, but we need to wait for further developments in the InterOp library.
In the mean time, this will have to do.
:return: heatmap plot object to be used in a MultiQC section
"""
options = py_interop_plot.filter_options(self.run_metrics.run_info().flowcell().naming_method())
rows = py_interop_plot.count_rows_for_heatmap(self.run_metrics)
cols = py_interop_plot.count_columns_for_heatmap(self.run_metrics)
dataBuffer = numpy.zeros((rows, cols), dtype=numpy.float32)
data = py_interop_plot.heatmap_data()
try:
py_interop_plot.plot_qscore_heatmap(self.run_metrics, options, data, dataBuffer.ravel())
except py_interop_plot.invalid_filter_option:
pass
plot_data = dataBuffer.transpose().tolist()
# cycles
x_cats = list(range(0, cols))
# qscore
y_cats = list(range(0, rows))
plot_config = {
"id": "sav-qscore-heatmap-plot",
"title": "SAV: Qscore Heatmap",
"xTitle": "Cycle",
"yTitle": "Qscore",
"square": False,
"colstops": [
[0, "#FFFFFF"],
[0.1, "#1a9850"],
[0.2, "#66bd63"],
[0.3, "#a6d96a"],
[0.4, "#d9ef8b"],
[0.5, "#ffffbf"],
[0.6, "#fee08b"],
[0.7, "#fdae61"],
[0.8, "#f46d43"],
[0.9, "#d73027"],
[1, "#a50026"],
],
}
return heatmap.plot(plot_data, x_cats, y_cats, plot_config)
def qscore_histogram_plot(self) -> linegraph.plot:
"""
Get histogram data from run_metrics object
Note: this function has *much* room for improvement, but we need to wait for further developments in the InterOp library
In the mean time, this will have to do.
:return: linegraph plot object to be used in a MultiQC section
"""
bar_data = py_interop_plot.bar_plot_data()
options = py_interop_plot.filter_options(self.run_metrics.run_info().flowcell().naming_method())
py_interop_plot.plot_qscore_histogram(self.run_metrics, options, bar_data)
hist = {}
qscore = []
reads = []
binsize = []
for i in range(bar_data.size()):
qscore = [bar_data.at(i).at(j).x() for j in range(bar_data.at(i).size())]
reads = [bar_data.at(i).at(j).y() for j in range(bar_data.at(i).size())]
binsize = [bar_data.at(i).at(j).width() for j in range(bar_data.at(i).size())]
i = 0
while i < len(qscore):
j = 0
while j < binsize[i]:
hist.update({qscore[i] + j: reads[i]})
j += 1
i += 1
plot_data = {bar_data.title(): hist}
plot_config = {
"id": "sav-qscore-histogram-plot",
"title": "SAV: Qscore Histogram",
"xlab": "Qscore",
"ylab": "Reads (Billion)",
}
return linegraph.plot(plot_data, plot_config)
#############
# IMAGING QC
#############
def imaging_qc(self) -> None:
"""
Generate MultiQC sections related to Imaging.
This includes:
- Plot: Intensity/Cycle/Channel
- Plot: %Occ/%PF
:return: None
"""
log.info("Gathering Imaging metrics")
imaging = pd.DataFrame(interop.imaging(self.run_metrics))
plot_data = self.parse_imaging_table(imaging)
# - GRAPH: Intensity/Cycle/Channel
if len(plot_data.get("intensity_cycle", [])) > 0:
log.info("Generating 'Intensity per Cycle' plot")
self.add_section(
name="Intensity per Cycle",
anchor="sav-intensity-cycle",
description="Intensity by color and cycle of the 90% percentile of the data for each tile",
plot=self.intensity_cycle_plot(plot_data.get("intensity_cycle", [])),
)
# - GRAPH: %Occ/%PF
log.info("Generating '% PF vs % Occupied' plot")
if len(plot_data.get("occ_vs_pf", [])) > 0:
self.add_section(
name="% PF vs % Occupied",
anchor="sav-imaging-pf-vs-occ",
description="% Clusters passing filter vs % Wells Occupied",
plot=self.occ_vs_pf_plot(plot_data.get("occ_vs_pf", [])),
)
def parse_imaging_table(self, data: pd.DataFrame) -> Dict:
"""
Parse full imaging table DataFrame
:return: Dict containing data for intesity per cylce plot (key:"intensity_cycle") and %occ vs %pf plot (key: ""occ_vs_pf")
"""
# set color scale for occ_pf
cscale = mqc_colour.mqc_colour_scale()
colors = cscale.get_colours("Dark2")
per_lane = data.groupby("Lane")
occ_pf = {}
intensity_cycle = {}
for lane, lane_data in per_lane:
lane = int(lane)
# prep intensity_cycle
CHANNEL_SETS = [{"P90/RED", "P90/GREEN"}, {"P90/Red", "P90/Green"}, {"P90/G", "P90/A", "P90/T", "P90/C"}]
channels = set()
for channel_set in CHANNEL_SETS:
if channel_set.issubset(lane_data.columns):
channels = channel_set
# prep occ_pf
if not f"Lane {lane}" in occ_pf:
occ_pf[f"Lane {lane}"] = []
prev_occ = 0
prev_pf = 0
# parse imaging table lane
for _, row in lane_data.iterrows():
# intensity_cyle
cycle = int(row["Cycle"])
for channel in channels:
intensity = float(row[channel])
if not channel in intensity_cycle:
intensity_cycle[channel] = {}
if not cycle in intensity_cycle[channel]:
intensity_cycle[channel].update({cycle: 0})
intensity_cycle[channel][cycle] += intensity
# occ_pf
if {"% Occupied", "% Pass Filter"}.issubset(lane_data.columns):
occ = float(row["% Occupied"])
pf = float(row["% Pass Filter"])
if occ != prev_occ or pf != prev_pf:
prev_occ = occ
prev_pf = pf
occ_pf[f"Lane {lane}"].append({"x": occ, "y": pf, "color": colors[lane]})
else:
occ_pf = {}
return {"intensity_cycle": intensity_cycle, "occ_vs_pf": occ_pf}
def intensity_cycle_plot(self, data: Dict) -> linegraph.plot:
"""
Format Intensity per Cycle data dict and add plot config.
:return: linegraph plot object to be used in a MultiQC section
"""
# get keys from data
key_color_dict = {}
for key in data:
if re.match(r"\w+/red", key, re.IGNORECASE):
key_color_dict[key] = "red"
elif re.match(r"\w+/green", key, re.IGNORECASE):
key_color_dict[key] = "green"
elif re.match(r"\w+/G", key):
key_color_dict[key] = "blue"
elif re.match(r"\w+/A", key):
key_color_dict[key] = "black"
elif re.match(r"\w+/T", key):
key_color_dict[key] = "green"
elif re.match(r"\w+/C", key):
key_color_dict[key] = "red"
plot_config = {
"id": "sav-intensity-vs-cycle-plot",
"title": "SAV: Intensity per cycle",
"xlab": "Cycle",
"ylab": "Intensity",
"colors": key_color_dict,
}
return linegraph.plot(data, plot_config)
def occ_vs_pf_plot(self, data: Dict) -> scatter.plot:
"""
Format %Occ vs %PF data dict and add plot config.
:return: scatter plot object to be used in a MultiQC section
"""
plot_config = {
"id": "sav-pf-vs-occ-plot",
"title": "SAV: % Passing Filter vs % Occupied",
"xlab": "% Occupied",
"ylab": "% Passing Filter",
"xmin": 0,
"xmax": 100,
"ymin": 0,
"ymax": 100,
}
return scatter.plot(data, plot_config)
| 37.99734
| 208
| 0.549591
|
bc33d893f264ec12e3f913cdc3c2bf1da97767dc
| 2,309
|
py
|
Python
|
MP4_concentration.py
|
wolfiex/ropacode
|
2b5caa4ed52d985c9b6eb626f364655af20d97f3
|
[
"CC0-1.0"
] | null | null | null |
MP4_concentration.py
|
wolfiex/ropacode
|
2b5caa4ed52d985c9b6eb626f364655af20d97f3
|
[
"CC0-1.0"
] | null | null | null |
MP4_concentration.py
|
wolfiex/ropacode
|
2b5caa4ed52d985c9b6eb626f364655af20d97f3
|
[
"CC0-1.0"
] | null | null | null |
import numpy as np
import matplotlib, datetime
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import pandas as pd
import sys,os,re,multiprocessing,netCDF4
from netCDF4 import Dataset
#netcdf file path
ncfile = sys.argv[1]
netCDF_data = Dataset(ncfile, mode='r')
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
def colourmap(mn=0,mx=1):
#returns function that when used between range mn and mx give the rgb equivalent of the colorscheme selected
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
jet = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=mn, vmax=mx)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
return scalarMap.to_rgba
def update_plt(num):
timestep = runs[num]
M=s_file.loc[timestep,'M']
shead = s_file.columns
if num%10 == 0 : print num
cm(num)
plt.cla()
plt.title(datetime.datetime.fromtimestamp(timestep*10*60).strftime('%d %H:%M'))
#plt.xlim(0, 1)
plt.ylim(-70, 30)
plt.xlabel('index')
plt.ylabel('conc/M')
a = s_file.ix[timestep]
a.sort()
line = [(a.map(lambda x: -1*abs(np.log10(x/s_file.loc[timestep,'M'])+1))).plot( kind = 'area', color=cm(num), label=num) ] #,'r'
#line = [(a.map(lambda x: abs(np.log10(x/s_file.loc[timestep,'M'])))).plot( color=cm(num), label=num) ] #,'r'
return line,
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800)
for group in netCDF_data.groups:
print '\n\nReading', group
global s_file,cm,runs
s_file = pd.DataFrame(netCDF_data.groups[group].variables['Spec'][:])
s_file.columns = str(netCDF_data.groups[group].variables['Spec'].head).split(',')
runs = xrange(0,len(s_file.index),1)# [0] #### LAST RUN NEEDS TO BE ONE WITH GREATEST NO REACTIONS! ### 144,288,431,
cm = colourmap(min(runs),max(runs))
fig1 = plt.figure()
line_ani = animation.FuncAnimation(fig1, update_plt, len(runs), fargs=(), interval=50, blit=True)
print 'runs/4'
line_ani.save('%s.mp4'%group, writer=writer)
| 27.819277
| 133
| 0.672586
|
addd8bb8ee3b05343d23885eaff761193ac4bd4a
| 2,127
|
py
|
Python
|
1-Getting Started with Python/basics3.py
|
hchs-hdac/git-tutorial
|
c2a89b3219c89185b44c312c8db22e469ea2ffe5
|
[
"CC0-1.0"
] | 1
|
2020-04-12T20:12:54.000Z
|
2020-04-12T20:12:54.000Z
|
1-Getting Started with Python/basics3.py
|
hchs-hdac/git-tutorial
|
c2a89b3219c89185b44c312c8db22e469ea2ffe5
|
[
"CC0-1.0"
] | null | null | null |
1-Getting Started with Python/basics3.py
|
hchs-hdac/git-tutorial
|
c2a89b3219c89185b44c312c8db22e469ea2ffe5
|
[
"CC0-1.0"
] | 5
|
2020-04-09T20:16:03.000Z
|
2020-11-20T01:42:38.000Z
|
# Conditionals (if/elif/else) - will run commands only if the statement is true
x = 1
lst = ['Corn Flakes', 'Cheerios', 'Special K']
if x == 1: # the == signifies "equal to" - this is in order to distinguish from = which assigns variables
print('X is 1.') # if x is 1, Python will execute this line
else:
print('X is not 1.') # if x is anything else, Python will execute this line
# elif - shortened "else if"
# in this instance, Python will read the first if statement, then conclude that x is not in the list
# then it will move on to the elif statement and execute that command (X is 1 will print) - and won't read the else statement
# now add the integer 1 to the list and run it again - this time it executes the first command
if x in lst:
print(str(x) + ' is amazing!') # have to change the type of x to a string so that the strings can concatenate
elif x == 1:
print('X is 1.')
else:
print(x)
# Loops: allow you to execute the same commands a set number of times
# the two loops below both print the numbers 0 through 9 to the console
# while loop
y = 0
while y < 10: # while loops will loop while the condition is true, and then break once it becomes false
print(y)
y += 1 # the y += 1 is shorthand for y = y + 1
# for loop: i is an arbitrary variable - it can be named anything
for i in range(0, 10): # range from 0 up to but not including 10 - this loop will run 10 times, because there are 10 digits in the range (0, 10)
print(i)
# for loops are useful for looping through datasets (i.e. lists)
# with this loop, we are generating a copy of the previous list in a new list
new_lst = [] # empty list
for i in lst: # loops through all the items in lst
new_lst.append(i) # i attached to the new list at the back
print(new_lst)
# List comprehensions: a handy shortcut for the task above
new_lst = [i for i in lst] # creates new lists in 1 line - you can skip the appending step here
print(new_lst)
# Another example
lst_of_nums = [3, 2, 58, 20]
new_lst_of_nums = [num + 1 for num in lst_of_nums] # you can also modify the elements in the original list
print(new_lst_of_nums)
| 44.3125
| 144
| 0.708039
|
9f283f7aa7d975680710816dd589caa3936fc170
| 1,500
|
py
|
Python
|
nff/utils/scatter.py
|
torchmd/mdgrad
|
77bd7685b74b41acf54a9483546e1e8cb545eb01
|
[
"MIT"
] | 54
|
2021-03-10T18:35:49.000Z
|
2022-03-28T13:54:47.000Z
|
nff/utils/scatter.py
|
wwang2/torchmd
|
77bd7685b74b41acf54a9483546e1e8cb545eb01
|
[
"MIT"
] | 1
|
2021-03-17T07:01:02.000Z
|
2021-03-17T07:01:02.000Z
|
nff/utils/scatter.py
|
torchmd/mdgrad
|
77bd7685b74b41acf54a9483546e1e8cb545eb01
|
[
"MIT"
] | 5
|
2021-06-08T02:44:35.000Z
|
2021-12-17T11:50:08.000Z
|
from itertools import repeat
from torch.autograd import grad
def compute_grad(inputs, output, create_graph=True, retain_graph=True):
"""Compute gradient of the scalar output with respect to inputs.
Args:
inputs (torch.Tensor): torch tensor, requires_grad=True
output (torch.Tensor): scalar output
Returns:
torch.Tensor: gradients with respect to each input component
"""
assert inputs.requires_grad
gradspred, = grad(output, inputs, grad_outputs=output.data.new(output.shape).fill_(1),
create_graph=create_graph, retain_graph=retain_graph)
return gradspred
def gen(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
dim = range(src.dim())[dim] # Get real dim value.
# Automatically expand index tensor to the right dimensions.
if index.dim() == 1:
index_size = list(repeat(1, src.dim()))
index_size[dim] = src.size(dim)
index = index.view(index_size).expand_as(src)
# Generate output tensor if not given.
if out is None:
dim_size = index.max().item() + 1 if dim_size is None else dim_size
out_size = list(src.size())
out_size[dim] = dim_size
out = src.new_full(out_size, fill_value)
return src, out, index, dim
def scatter_add(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
src, out, index, dim = gen(src, index, dim, out, dim_size, fill_value)
return out.scatter_add_(dim, index, src)
| 32.608696
| 90
| 0.664667
|
e160aaf0a90f9957dbc9c31bdca7a559b5c338f3
| 22,996
|
py
|
Python
|
Test/undoManager_test.py
|
justvanrossum/jundo
|
16e806cef60a2bd88d3916bff61d2befc3db5956
|
[
"MIT"
] | 12
|
2018-12-29T13:56:54.000Z
|
2021-09-19T13:04:03.000Z
|
Test/undoManager_test.py
|
justvanrossum/jundo
|
16e806cef60a2bd88d3916bff61d2befc3db5956
|
[
"MIT"
] | 1
|
2020-01-30T18:22:38.000Z
|
2021-09-19T18:51:53.000Z
|
Test/undoManager_test.py
|
justvanrossum/jundo
|
16e806cef60a2bd88d3916bff61d2befc3db5956
|
[
"MIT"
] | null | null | null |
from collections.abc import Mapping, Sequence, Set
import pytest
from jundo.undoManager import (
Change,
UndoManager,
UndoManagerError,
UndoProxy,
UndoProxyAttributeObject,
UndoProxyBase,
UndoProxyMapping,
UndoProxySequence,
UndoProxySet,
addItem,
addNestedItem,
getItem,
getNestedItem,
hasItem,
registerUndoProxy,
removeItem,
removeNestedItem,
replaceItem,
replaceNestedItem,
)
class _AttributeObject:
def someMethod(self, x):
return x + 2
def __repr__(self):
attrsRepr = ", ".join(f"{k}={v!r}" for k, v in self.__dict__.items())
return f"{self.__class__.__name__}({attrsRepr})"
class _error_dict(dict):
def __setitem__(self, key, value):
raise ValueError("test")
class TestUndoManager:
def test_module_docstring_example(self):
model = [1, 2, 3, {"a": 123}]
um = UndoManager()
proxy = um.setModel(model)
# Modifications must be done within a change set context:
with um.changeSet(title="replace list item"):
proxy[1] = 2000
assert model[1] == 2000
um.undo()
assert model[1] == 2
um.redo()
assert model[1] == 2000
with um.changeSet(title="replace nested dict item"):
proxy[3]["a"] = 456
assert model[3]["a"] == 456
um.undo()
assert model[3]["a"] == 123
def test_undoInfo(self):
model = [1, "a", "Q"]
um = UndoManager()
proxy = um.setModel(model)
assert um.undoInfo() is None
assert um.redoInfo() is None
with um.changeSet(title="undo action", more="any info"):
proxy[1] = 2000
assert um.undoInfo() == {'more': 'any info', 'title': 'undo action'}
assert um.redoInfo() is None
um.undo()
assert um.undoInfo() is None
assert um.redoInfo() == {'more': 'any info', 'title': 'undo action'}
um.redo()
assert um.undoInfo() == {'more': 'any info', 'title': 'undo action'}
assert um.redoInfo() is None
um.undo()
assert um.undoInfo() is None
with um.changeSet(title="another"):
proxy[1] = 2000
assert um.undoInfo() == {'title': 'another'}
assert um.redoInfo() is None
def test_modify_without_changeSet(self):
model = [0, 1, 2, 3]
um = UndoManager()
proxy = um.setModel(model)
with pytest.raises(AssertionError):
proxy.append("foo")
assert "foo" not in model
def test_nested_changeSet(self):
model = [0, 1, 2, 3]
um = UndoManager()
_ = um.setModel(model)
with um.changeSet(title="outer"):
with pytest.raises(UndoManagerError):
with um.changeSet(title="inner"):
pass
def test_undo_within_changeSet(self):
model = [0, 1, 2, 3]
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="test 2"):
proxy.append(4)
assert model == [0, 1, 2, 3, 4]
with um.changeSet(title="test"):
with pytest.raises(UndoManagerError):
um.undo()
def test_empty_changeSet(self):
model = [0, 1, 2, 3]
um = UndoManager()
_ = um.setModel(model)
with um.changeSet(title="test"):
# nothing
pass
assert len(um.undoStack) == 0
def test_modify_error(self):
model = _error_dict()
um = UndoManager()
proxy = um.setModel(model)
with pytest.raises(ValueError):
with um.changeSet(title="error test"):
proxy["a"] = 12
assert um._currentChanges is None
assert "a" not in model
# assert that we *didn't* record an undo change
assert len(um.undoStack) == 0
assert len(um.undoStack) == 0
def test_rollback_after_error(self):
model = [1, 2, _error_dict()]
um = UndoManager()
proxy = um.setModel(model)
with pytest.raises(ValueError):
with um.changeSet(title="error test"):
assert len(model) == 3
proxy.append(12)
assert len(model) == 4
proxy[1] = 200
assert model[1] == 200
proxy[2]["a"] = 300
# assert that the first two changes have been rolled back
assert model == [1, 2, _error_dict()]
def test_replacing_model(self):
um = UndoManager()
_ = um.setModel({})
with pytest.raises(AssertionError):
_ = um.setModel({})
def test_changeMonitor(self):
changes = []
um = UndoManager(changeMonitor=changes.append)
proxy = um.setModel({})
with um.changeSet():
proxy["a"] = 1
proxy["b"] = 2
proxy["c"] = [3, 4, 5]
assert len(changes) == 1
changeSet = changes[-1]
expectedChanges = [
Change(op='add', path=('a',), value=1),
Change(op='add', path=('b',), value=2),
Change(op='add', path=('c',), value=[3, 4, 5]),
]
assert list(changeSet) == expectedChanges
with um.changeSet():
proxy["c"][2] = {}
proxy["c"][2]["x"] = "abc"
assert len(changes) == 2
changeSet = changes[-1]
expectedChanges = [
Change(op='replace', path=('c', 2), value={'x': 'abc'}), # odd but expected: mutable value on stack
Change(op='add', path=('c', 2, 'x'), value='abc'),
]
assert list(changeSet) == expectedChanges
um.undo()
assert len(changes) == 3
changeSet = changes[-1]
expectedChanges = [
Change(op='remove', path=('c', 2, 'x'), value=None),
Change(op='replace', path=('c', 2), value=5),
]
assert list(changeSet) == expectedChanges
um.undo()
assert len(changes) == 4
changeSet = changes[-1]
expectedChanges = [
Change(op='remove', path=('c',), value=None),
Change(op='remove', path=('b',), value=None),
Change(op='remove', path=('a',), value=None),
]
assert list(changeSet) == expectedChanges
um.redo()
assert len(changes) == 5
changeSet = changes[-1]
expectedChanges = [
Change(op='add', path=('a',), value=1),
Change(op='add', path=('b',), value=2),
Change(op='add', path=('c',), value=[3, 4, 5]),
]
assert list(changeSet) == expectedChanges
class TestProxies:
def test_collections_abc(self):
proxy = UndoProxySequence(None, None, None)
assert isinstance(proxy, Sequence)
proxy = UndoProxyMapping(None, None, None)
assert isinstance(proxy, Mapping)
proxy = UndoProxySet(None, None, None)
assert isinstance(proxy, Set)
def test_UndoProxy_dispatch(self):
assert UndoProxy(1, None) == 1
assert UndoProxy(1.2, None) == 1.2
assert UndoProxy("1.2", None) == "1.2"
assert type(UndoProxy([], None)) == UndoProxySequence
assert type(UndoProxy({}, None)) == UndoProxyMapping
assert type(UndoProxy(set(), None)) == UndoProxySet
assert type(UndoProxy(_AttributeObject(), None)) == UndoProxyAttributeObject
def test_tuple_atomic(self):
model = [1, 2, (3, 4, 5), 6, 7]
um = UndoManager()
proxy = um.setModel(model)
assert proxy[2] == (3, 4, 5)
assert type(proxy[2]) == tuple
with um.changeSet(title="replace item"):
proxy[1] = (200, 300)
assert model[1] == (200, 300)
assert type(proxy[1]) == tuple
assert type(model[1]) == tuple
assert model == [1, (200, 300), (3, 4, 5), 6, 7]
um.undo()
assert model == [1, 2, (3, 4, 5), 6, 7]
def test_callable(self):
model = _AttributeObject()
um = UndoManager()
proxy = um.setModel(model)
assert proxy.someMethod(3) == 5
def test_attr_repr(self):
model = _AttributeObject()
assert repr(model) == "_AttributeObject()"
model.a = 123
assert repr(model) == "_AttributeObject(a=123)"
model.b = "123"
assert repr(model) == "_AttributeObject(a=123, b='123')"
class TestList:
def test_list_append(self):
model = [0, 1, 2, 3]
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="list test"):
proxy.append("a")
proxy.append("b")
assert len(um.undoStack) == 1
assert len(um.redoStack) == 0
assert model == [0, 1, 2, 3, "a", "b"]
for a, b in zip(model, proxy):
assert a == b
assert um.undoInfo() == {"title": "list test"}
assert um.redoInfo() is None
assert len(um.undoStack) == 1
assert len(um.redoStack) == 0
um.undo()
assert um.undoInfo() is None
assert um.redoInfo() == {"title": "list test"}
assert len(um.undoStack) == 0
assert len(um.redoStack) == 1
assert model == [0, 1, 2, 3]
um.redo()
assert model == [0, 1, 2, 3, "a", "b"]
with pytest.raises(UndoManagerError):
um.redo()
def test_list_insert(self):
model = [0, 1, 2, 3]
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="list test"):
proxy.insert(2, "a")
proxy.insert(1, "b")
proxy.insert(5, "c")
assert model == [0, "b", 1, "a", 2, "c", 3]
um.undo()
assert model == [0, 1, 2, 3]
um.redo()
assert model == [0, "b", 1, "a", 2, "c", 3]
def test_list_insert_double(self):
model = [0, 1, 2, 3]
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="list test"):
proxy.insert(2, "a")
proxy.insert(2, "b")
assert model == [0, 1, "b", "a", 2, 3]
um.undo()
assert model == [0, 1, 2, 3]
um.redo()
assert model == [0, 1, "b", "a", 2, 3]
def test_list_remove(self):
model = [0, 1, 2, 3]
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="list test"):
del proxy[2]
assert model == [0, 1, 3]
um.undo()
assert model == [0, 1, 2, 3]
um.redo()
assert model == [0, 1, 3]
def test_list_remove_double(self):
model = [0, 1, 2, 3]
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="list test"):
del proxy[1]
del proxy[1]
assert model == [0, 3]
um.undo()
assert model == [0, 1, 2, 3]
um.redo()
assert model == [0, 3]
def test_list_replace(self):
model = [0, 1, 2, 3]
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="list test"):
proxy[2] = "a"
assert model == [0, 1, "a", 3]
um.undo()
assert model == [0, 1, 2, 3]
um.redo()
assert model == [0, 1, "a", 3]
um.undo()
assert model == [0, 1, 2, 3]
def test_list_replace2(self):
model = ["a", "b", "c"]
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="list test"):
proxy[1] = "B"
assert proxy[1] == model[1] == "B"
um.undo()
assert model == ["a", "b", "c"]
um.redo()
assert model == ["a", "B", "c"]
def test_list_index(self):
model = ["a", "b", "c"]
proxy = UndoProxy(model, None)
assert len(proxy) == 3
assert proxy[-1] == "c"
with pytest.raises(IndexError):
proxy[100]
class TestDictionary:
def test_dictionary(self):
model = {}
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="dict test"):
proxy["a"] = 12
assert model == {"a": 12}
assert model["a"] == proxy["a"]
with um.changeSet(title="dict test 2"):
proxy["a"] = 1200
assert model == {"a": 1200}
with um.changeSet(title="dict test 3"):
proxy["b"] = 24
assert model == {"a": 1200, "b": 24}
um.undo()
assert model == {"a": 1200}
um.undo()
assert model == {"a": 12}
um.undo()
assert model == {}
um.redo()
um.redo()
um.redo()
assert model == {"a": 1200, "b": 24}
um.undo()
with um.changeSet(title="dict test 4"):
proxy["c"] = 48
# assert model == {"a": 1200, "c": 24}
assert model == {"a": 1200, "c": 48}
with pytest.raises(UndoManagerError):
um.redo()
with um.changeSet(title="dict test 5"):
del proxy["a"]
assert model == {"c": 48}
um.undo()
um.undo()
assert model == {"a": 1200}
def test_dictionary_iter(self):
d = {"a": 1, "b": 2}
proxy = UndoProxy(d, None)
assert list(proxy) == ["a", "b"]
def test_dictionary_repr(self):
model = {"a": 1, "b": 2}
proxy = UndoProxy(model, None)
assert repr(proxy) == "UndoProxyMapping({'a': 1, 'b': 2}, path=())"
assert len(proxy) == 2
def test_dictionary_multiple(self):
model = {}
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="dict test"):
proxy["a"] = 12
with um.changeSet(title="dict test multiple"):
proxy["a"] = 13
proxy["a"] = 14
um.undo()
assert model["a"] == 12
um.redo()
assert model["a"] == 14
def test_dictionary_non_str_keys(self):
model = {}
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="dict test"):
proxy[1.5] = 12
proxy[10] = 120
proxy[("T", "o")] = -30
um.undo()
assert 1.5 not in model
assert 10 not in model
assert ("T", "o") not in model
um.redo()
assert model[1.5] == 12
assert model[10] == 120
assert model[("T", "o")] == -30
with um.changeSet(title="dict test"):
del proxy[1.5]
del proxy[10]
del proxy[("T", "o")]
assert 1.5 not in model
assert 10 not in model
assert ("T", "o") not in model
class TestAttributes:
def test_object(self):
model = _AttributeObject()
um = UndoManager()
proxy = um.setModel(model)
assert model.__dict__ == {}
with um.changeSet(title="object test"):
proxy.foo = 12
assert model.__dict__ == {"foo": 12}
assert proxy.foo == model.foo
um.undo()
assert model.__dict__ == {}
um.redo()
assert model.__dict__ == {"foo": 12}
with um.changeSet(title="object test 2"):
del proxy.foo
assert model.__dict__ == {}
um.undo()
assert model.__dict__ == {"foo": 12}
with pytest.raises(AssertionError):
proxy.bar = 123
with um.changeSet(title="replace test"):
proxy.foo = 123
class TestSet:
def test_set(self):
model = {1, 3, 5, 7}
um = UndoManager()
proxy = um.setModel(model)
assert 1 in proxy
assert 2 not in proxy
assert set(proxy) == model
with um.changeSet(title="add item"):
proxy.add(9)
with um.changeSet(title="remove item"):
proxy.remove(3)
assert 3 not in proxy
assert 3 not in model
assert 9 in proxy
assert 9 in model
assert set(proxy) == {1, 5, 7, 9}
um.undo()
assert set(proxy) == {1, 3, 5, 7, 9}
um.undo()
assert set(proxy) == {1, 3, 5, 7}
def test_set_insert(self):
model = [1, 2, 3, 4]
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="insert set"):
proxy[1] = {1, 2, 3}
assert model == [1, {1, 2, 3}, 3, 4]
assert proxy[1] == {1, 2, 3}
assert isinstance(proxy[1], Set)
assert not isinstance(proxy[1], set) # it's an UndoProxy after all
um.undo()
assert model == [1, 2, 3, 4]
um.redo()
assert model == [1, {1, 2, 3}, 3, 4]
def test_set_add_discard(self):
model = {1, 3, 5, 7}
um = UndoManager()
proxy = um.setModel(model)
with um.changeSet(title="add existing value"):
proxy.add(3) # already there
assert len(um.undoStack) == 0
with um.changeSet(title="remove non-existing value"):
proxy.discard(2)
assert len(um.undoStack) == 0
with um.changeSet(title="remove non-existing value"):
with pytest.raises(KeyError):
proxy.remove(2)
assert len(um.undoStack) == 0
class TestGenericFunctions:
def test_generic_hasItem(self):
d = {"a": 1}
o = _AttributeObject()
o.foo = 1
assert hasItem(d, "a")
assert hasItem(o, "foo")
assert not hasItem(d, "b")
assert not hasItem(o, "bar")
def test_generic_getItem(self):
d = {"a": 1}
lst = [1]
o = _AttributeObject()
o.foo = 1
assert getItem(d, "a") == 1
assert getItem(lst, 0) == 1
assert getItem(o, "foo") == 1
def test_generic_addItem(self):
d = {"a": 1}
lst = [1]
o = _AttributeObject()
o.foo = 1
addItem(d, "b", 2)
addItem(lst, 1, 2)
addItem(o, "bar", 2)
assert getItem(d, "b") == 2
assert getItem(lst, 1) == 2
assert getItem(o, "bar") == 2
with pytest.raises(AssertionError):
addItem(d, "b", 2)
with pytest.raises(AssertionError):
addItem(o, "bar", 2)
def test_generic_replaceItem(self):
d = {"a": 1}
lst = [1]
o = _AttributeObject()
o.foo = 1
replaceItem(d, "a", 2)
replaceItem(lst, 0, 2)
replaceItem(o, "foo", 2)
assert getItem(d, "a") == 2
assert getItem(lst, 0) == 2
assert getItem(o, "foo") == 2
with pytest.raises(AssertionError):
replaceItem(d, "b", 2)
with pytest.raises(AssertionError):
replaceItem(o, "bar", 2)
def test_generic_removeItem(self):
d = {"a": 1}
lst = [1]
o = _AttributeObject()
o.foo = 1
removeItem(d, "a")
removeItem(lst, 0)
removeItem(o, "foo")
assert not hasItem(d, "a")
assert len(lst) == 0
assert not hasItem(o, "foo")
def test_getNestedItem(self):
o = _AttributeObject()
o.foo = "foo"
d = {"a": [1, 2, 3, {"b": 4, "c": ["a", "b", "c"]}, o]}
assert getNestedItem(d, ("a", 1)) == 2
assert getNestedItem(d, ("a", 2)) == 3
assert getNestedItem(d, ("a", 3, "b")) == 4
assert getNestedItem(d, ("a", 3, "c", 1)) == "b"
assert getNestedItem(d, ("a", 4)) == o
assert getNestedItem(d, ("a", 4, "foo")) == "foo"
with pytest.raises(AttributeError):
getNestedItem(d, ("a", 2, "b"))
with pytest.raises(IndexError):
getNestedItem(d, ("a", 5))
def test_addNestedItem(self):
o = _AttributeObject()
d = {"a": [1, 2, 3, {"b": 4, "c": ["a", "b", "c", o]}]}
addNestedItem(d, ("b",), "B")
assert getNestedItem(d, ("b",)) == "B"
addNestedItem(d, ("a", 0), "C")
assert d == {"a": ["C", 1, 2, 3, {"b": 4, "c": ["a", "b", "c", o]}], "b": "B"}
addNestedItem(d, ("a", 4, "c", 4), "Q")
assert d == {"a": ["C", 1, 2, 3, {"b": 4, "c": ["a", "b", "c", o, "Q"]}], "b": "B"}
addNestedItem(d, ("a", 4, "c", 3, "foo"), "QQQ")
with pytest.raises(AssertionError):
addNestedItem(d, ("a", 4, "c", 3, "foo"), "QQQ")
assert getNestedItem(d, ("a", 4, "c", 3, "foo")) == "QQQ"
assert o.foo == "QQQ"
def test_replaceNestedItem(self):
o = _AttributeObject()
o.foo = 1
d = {"a": [1, 2, 3, {"b": 4, "c": ["a", "b", "c"], "d": o}]}
replaceNestedItem(d, ("a", 1), 222)
assert d == {"a": [1, 222, 3, {"b": 4, "c": ["a", "b", "c"], "d": o}]}
replaceNestedItem(d, ("a", 3, "d", "foo"), 222)
assert o.foo == 222
with pytest.raises(AssertionError):
replaceNestedItem(d, ("b"), 333)
with pytest.raises(AssertionError):
replaceNestedItem(d, ("a", 3, "d", "bar"), 222)
def test_removeNestedItem(self):
o = _AttributeObject()
o.foo = 1
d = {"a": [1, 2, 3, {"b": 4, "c": ["a", "b", "c"], "d": o}]}
removeNestedItem(d, ("a", 1))
assert d == {"a": [1, 3, {"b": 4, "c": ["a", "b", "c"], "d": o}]}
removeNestedItem(d, ("a", 2, "c", 1))
assert d == {"a": [1, 3, {"b": 4, "c": ["a", "c"], "d": o}]}
removeNestedItem(d, ("a", 2, "c"))
assert d == {"a": [1, 3, {"b": 4, "d": o}]}
assert hasattr(o, "foo")
removeNestedItem(d, ("a", 2, "d", "foo"))
assert not hasattr(o, "foo")
class MyCustomModel:
"""This model class demonstrates a simple case that doesn't fit the
JSON-like mold, yet can be used with the undo manager anyway via a
custom proxy.
"""
def __init__(self, position):
self.position = position
def move(self, delta):
self.position += delta
class MyCustomUndoProxy(UndoProxyBase):
@staticmethod
def modelReplaceItem(model, key, value):
# This method gets registered as a specializer for replaceItem in the
# registerUndoProxy() call.
assert key == "move"
model.move(value)
def move(self, delta):
path = self._path + ("move",) # we use the last part of the path as a method name
change = Change("replace", path, delta) # we use the "replace" operator to capture the method call
invChange = Change("replace", path, -delta)
self._undoManager.ensureCanAddChange() # this raises AssertionError if a change can't be added
self._modelObject.move(delta) # forward the call to the model object as-is
self._undoManager.addChange(change, invChange) # add the change to the undo stack
# We need to register the proxy class so it'll be picked up automatically
registerUndoProxy(MyCustomModel, MyCustomUndoProxy)
def test_custom_proxy():
model = MyCustomModel(10)
um = UndoManager()
proxy = um.setModel(model)
assert type(proxy) == MyCustomUndoProxy
with um.changeSet(title="move"):
proxy.move(10)
assert model.position == 20
um.undo()
assert model.position == 10
| 32.297753
| 112
| 0.51435
|
72c58b4e9db8405c2e1faaf42f85d952d8aa9bea
| 3,447
|
py
|
Python
|
sympy/matrices/expressions/funcmatrix.py
|
Geektrovert/sympy
|
30e7f9f4f2c26ec8f3e1577b9bf163e4851ab3d5
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/matrices/expressions/funcmatrix.py
|
Geektrovert/sympy
|
30e7f9f4f2c26ec8f3e1577b9bf163e4851ab3d5
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/matrices/expressions/funcmatrix.py
|
Geektrovert/sympy
|
30e7f9f4f2c26ec8f3e1577b9bf163e4851ab3d5
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division
from .matexpr import MatrixExpr
from sympy.core.basic import Basic
from sympy.core.function import FunctionClass, Lambda
from sympy.core.sympify import _sympify, sympify
from sympy.matrices import Matrix
from sympy.functions.elementary.complexes import re, im
class FunctionMatrix(MatrixExpr):
"""Represents a matrix using a function (``Lambda``) which gives
outputs according to the coordinates of each matrix entries.
Parameters
==========
rows : nonnegative integer
cols : nonnegative integer
lamda : Function, Lambda or str
If it is a SymPy ``Function`` or ``Lambda`` instance,
it should be able to accept two arguments which represents the
matrix coordinates.
If it is a pure string containing python ``lambda`` semantics,
it is interpreted by the SymPy parser and casted into a SymPy
``Lambda`` instance.
Examples
========
Creating a ``FunctionMatrix`` from ``Lambda``:
>>> from sympy import FunctionMatrix, symbols, Lambda, MatPow, Matrix
>>> i, j = symbols('i,j')
>>> X = FunctionMatrix(3, 3, Lambda((i, j), i + j))
>>> Matrix(X)
Matrix([
[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
Creating a ``FunctionMatrix`` from a sympy function:
>>> from sympy.functions import KroneckerDelta
>>> X = FunctionMatrix(3, 3, KroneckerDelta)
>>> X.as_explicit()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
Creating a ``FunctionMatrix`` from a sympy undefined function:
>>> from sympy.core.function import Function
>>> f = Function('f')
>>> X = FunctionMatrix(3, 3, f)
>>> X.as_explicit()
Matrix([
[f(0, 0), f(0, 1), f(0, 2)],
[f(1, 0), f(1, 1), f(1, 2)],
[f(2, 0), f(2, 1), f(2, 2)]])
Creating a ``FunctionMatrix`` from python ``lambda``:
>>> FunctionMatrix(3, 3, 'lambda i, j: i + j')
FunctionMatrix(3, 3, Lambda((i, j), i + j))
Example of lazy evaluation of matrix product:
>>> Y = FunctionMatrix(1000, 1000, Lambda((i, j), i + j))
>>> isinstance(Y*Y, MatPow) # this is an expression object
True
>>> (Y**2)[10,10] # So this is evaluated lazily
342923500
Notes
=====
This class provides an alternative way to represent an extremely
dense matrix with entries in some form of a sequence, in a most
sparse way.
"""
def __new__(cls, rows, cols, lamda):
rows, cols = _sympify(rows), _sympify(cols)
cls._check_dim(rows)
cls._check_dim(cols)
lamda = sympify(lamda)
if not isinstance(lamda, (FunctionClass, Lambda)):
raise ValueError(
"{} should be compatible with SymPy function classes."
.format(lamda))
if 2 not in lamda.nargs:
raise ValueError(
'{} should be able to accept 2 arguments.'.format(lamda))
return super(FunctionMatrix, cls).__new__(cls, rows, cols, lamda)
@property
def shape(self):
return self.args[0:2]
@property
def lamda(self):
return self.args[2]
def _entry(self, i, j, **kwargs):
return self.lamda(i, j)
def _eval_trace(self):
from sympy.matrices.expressions.trace import Trace
from sympy import Sum
return Trace(self).rewrite(Sum).doit()
def as_real_imag(self):
return (re(Matrix(self)), im(Matrix(self)))
| 28.487603
| 73
| 0.610966
|
fe723f70f6b0acf4bf27dd808690e85f3db331ab
| 3,086
|
py
|
Python
|
pydsm/correlations.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
pydsm/correlations.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
pydsm/correlations.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Sergio Callegari
# All rights reserved.
# This file is part of PyDSM.
# PyDSM is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyDSM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyDSM. If not, see <http://www.gnu.org/licenses/>.
"""
Correlation utilities (:mod:`pydsm.correlations`)
=================================================
Functions to compute the auto- and cross- correlation between two vectors.
So far only unnormalized (raw) correlations are supported.
.. currentmodule:: pydsm.correlations
Functions
---------
.. autosummary::
:toctree: generated/
raw_acorr -- raw autocorrelation of a vector
raw_xcorr -- raw crosscorrelation of a vector
"""
from __future__ import division, print_function
import numpy as np
import sys
if sys.version_info < (3,):
range = xrange
__all__ = ["raw_acorr", "raw_xcorr"]
def raw_acorr(x, N):
"""
Computes the raw autocorrelation of a vector up to lag N.
Parameters
----------
x : array_like
1-D sequence to compute the auto-correlation upon
N : int
the maximum (positive) lag of the raw auto-correlation to return.
Returns
-------
q : ndarray
the raw (unnormalized) autocorrelation vector.
Assuming that m is the length of x
q(k) = sum_{n=k}^{m-1} x(n) x(n-k) for k = 0 ... N
Notes
-----
The routine does not make any check on the length of x and N. It
is responsibility of the user to assure that len(x)>=N. In some cases
(but only in some cases), zero padding is practiced.
"""
m = len(x)
q = np.asarray([np.dot(x[k:m], x[0:m-k]) for k in range(N+1)])
return q
def raw_xcorr(x, y, N):
"""
Computes the raw crosscorrelation between two vectors up to lag N.
Parameters
----------
x : array_like
first 1-D vector
y : array_like
second 1-D vector
N : int
the maximum (positive) lag of the raw cross-correlation to return.
Returns
-------
q : ndarray
the raw (unnormalized) crosscorrelation vector.
Assuming that mx and my are the lengths of x and y
q(k) = sum_{n=k}^{min(mx-1,my+k-1)} x(n) y(n-k) for k = 0 ... N
Notes
-----
the routine does not make any check on the lengths of x, y and N. It
is responsibility of the user to assure that N<=len(y). In some cases
(but only in some cases), zero padding is assumed.
"""
mx = len(x)
my = len(y)
q = np.asarray([np.dot(y[k:min(my, mx+k)],
x[0:min(my-k, mx)]) for k in range(N+1)])
return q
| 27.553571
| 74
| 0.631886
|
b714daaa0b411d193c53c638ad323afc0e6b9996
| 4,205
|
py
|
Python
|
examples/pervasive/criterions/archive/multi_cross_entropy.py
|
EricLina/attn2d
|
982653439dedc7306e484e00b3dfb90e2cd7c9e1
|
[
"MIT"
] | 490
|
2018-08-18T14:25:38.000Z
|
2022-03-25T12:19:35.000Z
|
examples/pervasive/criterions/archive/multi_cross_entropy.py
|
VivianLiangB/attn2d
|
43babb8a00c723abc2e87c9ec3212de3010d9200
|
[
"MIT"
] | 17
|
2018-08-23T06:04:29.000Z
|
2022-03-26T07:35:26.000Z
|
examples/pervasive/criterions/archive/multi_cross_entropy.py
|
VivianLiangB/attn2d
|
43babb8a00c723abc2e87c9ec3212de3010d9200
|
[
"MIT"
] | 80
|
2018-08-20T09:54:49.000Z
|
2022-03-22T01:18:23.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from fairseq import utils
from . import FairseqCriterion, register_criterion
@register_criterion('multi_cross_entropy')
class MultiCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
self.scale = args.loss_scale
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--loss-scale', default='uniform', type=str)
# fmt: on
def get_loss_scale(self, n):
if self.scale == 'uniform':
return 1
elif self.scale == 'inverse':
return 1/n
elif self.scale == 'inverse_sqrt':
return 1/math.sqrt(n)
elif self.scale == 'prop':
return n
else:
raise ValueError('Unknonw scaling ', self.scale)
def forward(self, model, sample, step=-1, epoche=-1, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
decoder_outputs = model(**sample['net_input'])
total_nll_loss = decoder_outputs[0].new_zeros(1).float()
total_loss = decoder_outputs[0].new_zeros(1).float()
total_scales = 0
for i, output in enumerate(decoder_outputs):
loss, nll_loss = self.compute_loss(output, sample, reduce=reduce)
scale = self.get_loss_scale(i+1)
total_scales += scale
total_loss = total_loss + scale * loss
total_nll_loss = total_nll_loss + scale * nll_loss
# Average:
total_loss = total_loss / total_scales
total_nll_loss = total_nll_loss / total_scales
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(total_loss.data) if reduce else total_loss.data,
'nll_loss': utils.item(total_nll_loss.data) if reduce else total_nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, net_output, sample, reduce=True):
lprobs = utils.log_softmax(net_output, dim=-1)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = sample['target'].view(-1, 1)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = self.eps / lprobs.size(-1)
loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
return {
'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2),
'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / ntokens / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
| 40.432692
| 103
| 0.629251
|
b932c53435d89a32f1906047cd46ae1cbbe03e4e
| 47
|
py
|
Python
|
tests/__init__.py
|
Trim21/beancount-fetch
|
20ba65cd06dfae5de7c0e831fb88c2893b84b657
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
Trim21/beancount-fetch
|
20ba65cd06dfae5de7c0e831fb88c2893b84b657
|
[
"MIT"
] | 2
|
2020-09-24T23:18:32.000Z
|
2020-09-25T09:10:19.000Z
|
tests/__init__.py
|
Trim21/beancount-fetch
|
20ba65cd06dfae5de7c0e831fb88c2893b84b657
|
[
"MIT"
] | null | null | null |
"""Unit test package for beancount_fetcher."""
| 23.5
| 46
| 0.744681
|
e1de8bb2f68914de9dede24c9fd477fd4808da36
| 723
|
py
|
Python
|
tests/test_siaf_interface.py
|
mtakahiro/mirage
|
782bb01b46cba078f0f0afb9192e8439cf4ae5fc
|
[
"BSD-3-Clause"
] | 37
|
2018-10-03T13:42:11.000Z
|
2022-03-10T14:44:47.000Z
|
tests/test_siaf_interface.py
|
mtakahiro/mirage
|
782bb01b46cba078f0f0afb9192e8439cf4ae5fc
|
[
"BSD-3-Clause"
] | 531
|
2018-06-26T18:18:24.000Z
|
2022-03-31T13:09:36.000Z
|
tests/test_siaf_interface.py
|
mtakahiro/mirage
|
782bb01b46cba078f0f0afb9192e8439cf4ae5fc
|
[
"BSD-3-Clause"
] | 39
|
2018-06-26T19:13:08.000Z
|
2022-03-02T02:22:26.000Z
|
"""Test the functions provided by siaf_interface.
Authors
-------
- Johannes Sahlmann
Use
---
>>> pytest -s test_siaf_interface.py
"""
from pysiaf import iando
from mirage.utils import siaf_interface
def test_sci_subarray_corners():
"""Unit test for siaf_interface.sci_subarray_corners."""
instrument = 'NIRCam'
siaf_detector_layout = iando.read.read_siaf_detector_layout()
master_aperture_names = siaf_detector_layout['AperName'].data
for aperture_name in master_aperture_names:
if 'NRC' not in aperture_name:
continue
x_sci, y_sci = siaf_interface.sci_subarray_corners(instrument, aperture_name)
assert len(x_sci) == 2
assert len(y_sci) == 2
| 24.931034
| 85
| 0.709544
|
c6a60725b02d0b878a5be3af785d5399a2bff48f
| 91
|
py
|
Python
|
DQMOffline/Muon/python/muonPFAnalyzer_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DQMOffline/Muon/python/muonPFAnalyzer_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DQMOffline/Muon/python/muonPFAnalyzer_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.Muon.muonPFAnalyzer_cfi import *
| 22.75
| 48
| 0.846154
|
84ff5e3e995caff4edbbd811e9cfeaa2824e732a
| 4,809
|
py
|
Python
|
venv/lib/python3.6/site-packages/nlp/datasets/xquad/d77ebc147170f49904a4c13b55ba63a39324810caad218d09a857227b8956df5/xquad.py
|
MachineLearningBCAM/minimax-risk-classifier
|
82586c632268c103de269bcbffa5f7849b174a29
|
[
"MIT"
] | 2
|
2021-09-28T01:36:21.000Z
|
2021-12-22T08:24:17.000Z
|
venv/lib/python3.6/site-packages/nlp/datasets/xquad/d77ebc147170f49904a4c13b55ba63a39324810caad218d09a857227b8956df5/xquad.py
|
MachineLearningBCAM/minimax-risk-classifier
|
82586c632268c103de269bcbffa5f7849b174a29
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/nlp/datasets/xquad/d77ebc147170f49904a4c13b55ba63a39324810caad218d09a857227b8956df5/xquad.py
|
MachineLearningBCAM/minimax-risk-classifier
|
82586c632268c103de269bcbffa5f7849b174a29
|
[
"MIT"
] | 1
|
2020-12-08T10:36:30.000Z
|
2020-12-08T10:36:30.000Z
|
"""TODO(xquad): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import nlp
_CITATION = """\
@article{Artetxe:etal:2019,
author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},
title = {On the cross-lingual transferability of monolingual representations},
journal = {CoRR},
volume = {abs/1910.11856},
year = {2019},
archivePrefix = {arXiv},
eprint = {1910.11856}
}
"""
_DESCRIPTION = """\
XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question answering
performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from the development set
of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into ten languages: Spanish, German,
Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently, the dataset is entirely parallel
across 11 languages.
"""
_URL = 'https://github.com/deepmind/xquad/raw/master/'
_LANG = ['ar', 'de', 'zh', 'vi', 'en', 'es', 'hi', 'el', 'th', 'tr', 'ru']
class XquadConfig(nlp.BuilderConfig):
""" BuilderConfig for Xquad"""
def __init__(self,
lang,
**kwargs
):
"""
Args:
lang: string, language for the input text
**kwargs: keyword arguments forwarded to super.
"""
super(XquadConfig, self).__init__(
version=nlp.Version(
"1.0.0",
"New split API (https://tensorflow.org/datasets/splits)"),
**kwargs)
self.lang = lang
class Xquad(nlp.GeneratorBasedBuilder):
"""TODO(xquad): Short description of my dataset."""
# TODO(xquad): Set up version.
VERSION = nlp.Version('1.0.0')
BUILDER_CONFIGS = [
XquadConfig(
name='xquad.{}'.format(lang),
description= _DESCRIPTION,
lang=lang
)for lang in _LANG
]
def _info(self):
# TODO(xquad): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features({
"id":
nlp.Value('string'),
"context":
nlp.Value('string'),
"question":
nlp.Value('string'),
"answers":
nlp.features.Sequence({
"text": nlp.Value('string'),
"answer_start": nlp.Value('int32'),
}),
# These are the features of your dataset like images, labels ...
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage='https://github.com/deepmind/xquad',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(xquad): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {
lang: _URL + 'xquad.{}.json'.format(lang) for lang in _LANG
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'filepath': downloaded_files[self.config.lang]
},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(xquad): Yields (key, example) tuples from the dataset
with open(filepath) as f:
xquad = json.load(f)
for article in xquad["data"]:
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"context": context,
"question": question,
"id": id_,
"answers": {
"answer_start": answer_starts,
"text": answers,
},
}
| 33.165517
| 121
| 0.587024
|
0a433b922ebb456f6df50b8b0ef0fc2c879c9c70
| 2,708
|
py
|
Python
|
Cogs/Game.py
|
chaejm55/discord_velog_bot
|
38574e2564f5eda2114c5a257e7ed8e1c5b3d29a
|
[
"MIT"
] | 1
|
2021-11-26T00:21:21.000Z
|
2021-11-26T00:21:21.000Z
|
Cogs/Game.py
|
chaejm55/discord_velog_bot
|
38574e2564f5eda2114c5a257e7ed8e1c5b3d29a
|
[
"MIT"
] | null | null | null |
Cogs/Game.py
|
chaejm55/discord_velog_bot
|
38574e2564f5eda2114c5a257e7ed8e1c5b3d29a
|
[
"MIT"
] | 1
|
2021-11-26T00:21:21.000Z
|
2021-11-26T00:21:21.000Z
|
import random
import os
from discord.ext import commands
from discord.ext.commands import MissingRequiredArgument
async def make_dir(directory_name):
try:
if not os.path.exists(directory_name):
os.makedirs(directory_name)
except OSError:
print('Error: makedirs()')
async def add_result(directory_name, user_name, result):
file_path = directory_name + '/' + user_name + '.txt'
if os.path.exists(file_path):
with open(file_path, 'a', encoding='UTF-8') as f:
f.write(result)
else:
with open(file_path, 'w', encoding='UTF-8') as f:
f.write(result)
class Game(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def dice(self, ctx):
randnum = random.randint(1, 6)
await ctx.send(f'주사위 결과는 {randnum} 입니다.')
@commands.command()
async def mining(self, ctx):
minerals = ['다이아몬드', '루비', '에메랄드', '자수정', '철', '석탄']
weights = [1, 3, 6, 15, 25, 50]
results = random.choices(minerals, weights=weights, k=5)
await ctx.send(', '.join(results) + ' 광물들을 획득하였습니다.')
@commands.command()
async def game(self, ctx, user: str):
rps_table = ['가위', '바위', '보']
bot = random.choice(rps_table)
result = rps_table.index(user) - rps_table.index(bot)
if result == 0:
result_text = f'{user} vs {bot} 비김'
await ctx.send(f'{user} vs {bot} 비겼습니다.')
elif result == 1 or result == -2:
result_text = f'{user} vs {bot} 승리!'
await ctx.send(f'{user} vs {bot} 유저가 이겼습니다.')
else:
result_text = f'{user} vs {bot} 패배...'
await ctx.send(f'{user} vs {bot} 봇이 이겼습니다.')
directory_name = "game_result"
await make_dir(directory_name)
await add_result(directory_name, str(ctx.author), result_text + '\n')
@game.error # @<명령어>.error의 형태로 된 데코레이터를 사용한다.
async def game_error(self, ctx, error): # 파라미터에 ctx, error를 필수로 한다.
if isinstance(error, MissingRequiredArgument): # isinstance로 에러에 따라 시킬 작업을 결정한다.
await ctx.send("가위/바위/보 중 낼 것을 입력해주세요.")
@commands.command(name="전적")
async def game_board(self, ctx):
user_name = str(ctx.author)
file_path = "game_result/" + user_name + ".txt"
if os.path.exists(file_path):
with open(file_path, "r", encoding="UTF-8") as f:
result = f.read()
await ctx.send(f'{ctx.author}님의 가위바위보 게임 전적입니다.\n==============================\n' + result)
else:
await ctx.send(f'{ctx.author}님의 가위바위보 전적이 존재하지 않습니다.')
def setup(bot):
bot.add_cog(Game(bot))
| 33.85
| 104
| 0.580871
|
9d623e84c4d9d1f6d0468a3bd49b37220365a694
| 3,150
|
py
|
Python
|
calm/dsl/cli/project_commands.py
|
LevyForchh/calm-dsl
|
ff6e021628c0ef8c04aaa5e37c80fe1fbff729e6
|
[
"Apache-2.0"
] | null | null | null |
calm/dsl/cli/project_commands.py
|
LevyForchh/calm-dsl
|
ff6e021628c0ef8c04aaa5e37c80fe1fbff729e6
|
[
"Apache-2.0"
] | 20
|
2020-06-30T01:00:36.000Z
|
2021-03-23T01:03:39.000Z
|
calm/dsl/cli/project_commands.py
|
LevyForchh/calm-dsl
|
ff6e021628c0ef8c04aaa5e37c80fe1fbff729e6
|
[
"Apache-2.0"
] | null | null | null |
import click
from .projects import (
get_projects,
delete_project,
create_project,
describe_project,
update_project,
)
from .main import create, get, update, delete, describe
from calm.dsl.tools import get_logging_handle
from calm.dsl.builtins import read_spec
LOG = get_logging_handle(__name__)
@get.command("projects")
@click.option("--name", "-n", default=None, help="Search for projects by name")
@click.option(
"--filter", "filter_by", "-f", default=None, help="Filter projects by this string"
)
@click.option("--limit", "-l", default=20, help="Number of results to return")
@click.option(
"--offset", "-o", default=0, help="Offset results by the specified amount"
)
@click.option(
"--quiet", "-q", is_flag=True, default=False, help="Show only project names"
)
def _get_projects(name, filter_by, limit, offset, quiet):
"""Get projects, optionally filtered by a string"""
get_projects(name, filter_by, limit, offset, quiet)
def create_project_from_file(file_location, project_name):
project_payload = read_spec(file_location)
if project_name:
project_payload["project_detail"]["name"] = project_name
return create_project(project_payload)
@create.command("project")
@click.option(
"--file",
"-f",
"project_file",
type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
help="Path of Project file to upload",
required=True,
)
@click.option(
"--name", "-n", "project_name", type=str, default="", help="Project name(optional)"
)
def _create_project(project_file, project_name):
"""Creates a project"""
if project_file.endswith(".json") or project_file.endswith(".yaml"):
res, err = create_project_from_file(project_file, project_name)
else:
LOG.error("Unknown file format")
return
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
project = res.json()
state = project["status"]["state"]
LOG.info("Project state: {}".format(state))
@delete.command("project")
@click.argument("project_names", nargs=-1)
def _delete_project(project_names):
"""Deletes a project"""
delete_project(project_names)
@describe.command("project")
@click.argument("project_name")
def _describe_project(project_name):
"""Describe a project"""
describe_project(project_name)
@update.command("project")
@click.argument("project_name")
@click.option(
"--file",
"-f",
"project_file",
type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
help="Path of Project file to upload",
required=True,
)
def _update_project(project_name, project_file):
"""Updates a project"""
if project_file.endswith(".json") or project_file.endswith(".yaml"):
payload = read_spec(project_file)
res, err = update_project(project_name, payload)
else:
LOG.error("Unknown file format")
return
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
project = res.json()
state = project["status"]["state"]
LOG.info("Project state: {}".format(state))
| 27.391304
| 87
| 0.677143
|
28f1c46d8de7e0612b8de14e4ddd32023aa14fa6
| 2,587
|
py
|
Python
|
orgjunk.py
|
genlikan/lazy-junk-organizer
|
5f833b3343c885c680a93463c90c6d5c18d7bbf7
|
[
"MIT"
] | null | null | null |
orgjunk.py
|
genlikan/lazy-junk-organizer
|
5f833b3343c885c680a93463c90c6d5c18d7bbf7
|
[
"MIT"
] | null | null | null |
orgjunk.py
|
genlikan/lazy-junk-organizer
|
5f833b3343c885c680a93463c90c6d5c18d7bbf7
|
[
"MIT"
] | null | null | null |
"""
-*- coding: utf-8 -*-
========================
Python Lazy Junk Files Organizer
========================
========================
"""
import os
from pathlib import Path
DIRECTORIES = {
"HTML": [".html5", ".html", ".htm", ".xhtml"],
"IMAGES": [".jpeg", ".jpg", ".tiff", ".gif", ".bmp", ".png", ".bpg", ".svg",
".heif", ".psd"],
"VIDEOS": [".avi", ".flv", ".wmv", ".mov", ".mp4", ".webm", ".vob", ".mng",
".qt", ".mpg", ".mpeg", ".3gp"],
"DOCUMENTS": [".oxps", ".epub", ".pages", ".docx", ".doc", ".fdf", ".ods",
".odt", ".pwi", ".xsn", ".xps", ".dotx", ".docm", ".dox",
".rvg", ".rtf", ".rtfd", ".wpd", ".xls", ".xlsx", ".ppt",
".pptx"],
"ARCHIVES": [".a", ".ar", ".cpio", ".iso", ".tar", ".gz", ".rz", ".7z",
".dmg", ".rar", ".xar", ".zip"],
"AUDIO": [".aac", ".aa", ".aac", ".dvf", ".m4a", ".m4b", ".m4p", ".mp3",
".msv", ".ogg", ".oga", ".raw", ".vox", ".wav", ".wma"],
"PLAINTEXT": [".txt", ".in", ".out"],
"PDF": [".pdf"],
"PYTHON": [".py"],
"XML": [".xml"],
"EXE": [".exe"],
"SHELL": [".sh"],
"C++": [".cpp"],
"C": [".c"],
"ASP Classic": [".asp"],
"ASP_NET": [".aspx", ".axd", ".asx", ".asmx", ".ashx"],
"CSS": [".css"],
"Coldfusion": [".cfm"],
"Erlang": [".yaws"],
"Flash": [".swf"],
"Java": [".jsp", ".jspx", ".wss", ".do", ".action"],
"JavaScript": [".js"],
"Perl": [".pl"],
"PHP": [".php", ".php4", ".php3", ".phtml"],
"Ruby": [".rb", ".rhtml"],
"SSI": [".shtml"],
"XML": [".xml", ".rss", ".svg"]
}
FILE_FORMATS = {file_format: directory
for directory, file_formats in DIRECTORIES.items()
for file_format in file_formats}
def organize_junk():
for entry in os.scandir():
if entry.is_dir():
continue
file_path = Path(entry)
file_format = file_path.suffix.lower()
if file_format in FILE_FORMATS:
directory_path = Path(FILE_FORMATS[file_format])
directory_path.mkdir(exist_ok=True)
file_path.rename(directory_path.joinpath(file_path))
try:
os.mkdir("OTHER-FILES")
except:
pass
for dir in os.scandir():
try:
if dir.is_dir():
os.rmdir(dir)
else:
os.rename(os.getcwd() + '/' + str(Path(dir)), os.getcwd() + '/OTHER-FILES/' + str(Path(dir)))
except:
pass
if __name__ == "__main__":
organize_junk()
| 30.797619
| 109
| 0.424043
|
473a9d8511b59239abbcdf62d14c36b1ce35f3e4
| 38
|
py
|
Python
|
helper/__init__.py
|
jyotint/aws-python
|
6f9eec2c259a0298eba769e510b9b294c8b69815
|
[
"Apache-2.0"
] | null | null | null |
helper/__init__.py
|
jyotint/aws-python
|
6f9eec2c259a0298eba769e510b9b294c8b69815
|
[
"Apache-2.0"
] | null | null | null |
helper/__init__.py
|
jyotint/aws-python
|
6f9eec2c259a0298eba769e510b9b294c8b69815
|
[
"Apache-2.0"
] | null | null | null |
# import constants
# import timeHelper
| 19
| 19
| 0.815789
|
650d7209e43b92463d32f1db8cc7e3565b9885c0
| 917
|
py
|
Python
|
meraki_sdk/models/mode_2_enum.py
|
meraki/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 37
|
2019-04-24T14:01:33.000Z
|
2022-01-28T01:37:21.000Z
|
meraki_sdk/models/mode_2_enum.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 10
|
2019-07-09T16:35:11.000Z
|
2021-12-07T03:47:53.000Z
|
meraki_sdk/models/mode_2_enum.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 17
|
2019-04-30T23:53:21.000Z
|
2022-02-07T22:57:44.000Z
|
# -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Mode2Enum(object):
"""Implementation of the 'Mode2' enum.
Either 'renew' or 'addDevices'. 'addDevices' will increase the license
limit, while 'renew' will extend the amount of time until expiration. This
parameter is legacy and only applies to coterm licensing; it should not be
specified when claiming per-device licenses. Please see <a target='_blank'
href='https://documentation.meraki.com/zGeneral_Administration/Licensing/Ad
ding_an_Enterprise_license_to_an_existing_Dashboard_account'>this
article</a> for more information.
Attributes:
ADDDEVICES: TODO: type description here.
RENEW: TODO: type description here.
"""
ADDDEVICES = 'addDevices'
RENEW = 'renew'
| 29.580645
| 95
| 0.684842
|
a7e70ad48aa58c241515eec6acf8db52bfc1f641
| 2,980
|
py
|
Python
|
pipeline.py
|
maciekszul/freesurfer_proc
|
37ca127cbf04766e07548448729da56888697d55
|
[
"BSD-3-Clause"
] | null | null | null |
pipeline.py
|
maciekszul/freesurfer_proc
|
37ca127cbf04766e07548448729da56888697d55
|
[
"BSD-3-Clause"
] | null | null | null |
pipeline.py
|
maciekszul/freesurfer_proc
|
37ca127cbf04766e07548448729da56888697d55
|
[
"BSD-3-Clause"
] | null | null | null |
from tools import files
import os.path as op
import json
import argparse
import subprocess as sp
json_file = "pipeline_params.json"
# argparse input
des = "pipeline script"
parser = argparse.ArgumentParser(description=des)
parser.add_argument(
"-f",
type=str,
nargs=1,
default=json_file,
help="JSON file with pipeline parameters"
)
parser.add_argument(
"-n",
type=int,
help="id list index"
)
args = parser.parse_args()
params = vars(args)
json_file = str(params["f"][0])
subj_index = params["n"]
print(json_file)
# read the pipeline params
with open(json_file) as pipeline_file:
pipeline_params = json.load(pipeline_file)
raw_path = pipeline_params["RAW_PATH"]
t1_path = pipeline_params["T1_PATH"]
fs_path = pipeline_params["FS_PATH"]
raw_subjects = files.get_folders_files(
raw_path,
wp=False
)[0]
raw_subjects.sort()
raw_subj = raw_subjects[subj_index]
raw_subj_dir = op.join(
raw_path,
raw_subj,
"scans",
"2_t1_mprage_sag_iso_1mm"
)
t1_subj_dir = op.join(
t1_path,
raw_subj
)
files.make_folder(t1_subj_dir)
def dcm2nii_func(t1_subj_dir, raw_subj_dir):
dcm2nii = "/cubric/software/mricron/dcm2nii"
try:
t1_file = files.get_files(
t1_subj_dir,
"co",
"nii.gz"
)[2][0]
print(raw_subj, op.exists(t1_file), "T1 image exists")
except:
sp.call([
dcm2nii,
"-4", "y",
"-a", "n",
"-c", "n",
"-d", "n",
"-e", "n",
"-f", "n",
"-g", "y",
"-i", "n",
"-o", t1_subj_dir,
"-p", "n",
"-r", "y",
"-v", "y",
"-x", "y",
raw_subj_dir
])
if pipeline_params["t1_conversion"]:
dcm2nii_func(t1_subj_dir, raw_subj_dir)
def recon_all_func(fs_path, raw_subj):
try:
t1_file = files.get_files(
t1_subj_dir,
"co",
"nii.gz"
)[2][0]
sp.call([
"recon-all",
"-i", t1_file,
"-subjid", raw_subj,
"-sd", fs_path,
"-all"
])
except:
print(raw_subj, "No T1 file for this participant")
fs_subj_dir = op.join(
fs_path,
raw_subj
)
if pipeline_params["recon_all"] and not op.exists(fs_subj_dir):
recon_all_func(fs_path, raw_subj)
def bem_watershed_func(fs_path, raw_subj):
sp.call([
"mne",
"watershed_bem",
"-s", raw_subj,
"-d", fs_path
])
if pipeline_params["bem_watershed"] and op.exists(fs_subj_dir):
bem_watershed_func(fs_path, raw_subj)
def make_scalp_surface_func(fs_path, raw_subj):
sp.call([
"mne",
"make_scalp_surfaces",
"-s", raw_subj,
"-d", fs_path,
"-f", "True"
])
if pipeline_params["make_scalp_surface"] and op.exists(fs_subj_dir):
make_scalp_surface_func(fs_path, raw_subj)
| 20.985915
| 68
| 0.57047
|
3358e0363acb1277a40adecca74fd2b21f5061d7
| 4,500
|
py
|
Python
|
python/ee/tests/featurecollection_test.py
|
gninglaity586/gningGIS
|
f0424abe81f1495797d9c45b924e8ea04f24efd5
|
[
"Apache-2.0"
] | 1
|
2021-01-28T01:45:40.000Z
|
2021-01-28T01:45:40.000Z
|
python/ee/tests/featurecollection_test.py
|
liangliang12/earthengine-api
|
f0424abe81f1495797d9c45b924e8ea04f24efd5
|
[
"Apache-2.0"
] | null | null | null |
python/ee/tests/featurecollection_test.py
|
liangliang12/earthengine-api
|
f0424abe81f1495797d9c45b924e8ea04f24efd5
|
[
"Apache-2.0"
] | 3
|
2017-08-25T05:12:46.000Z
|
2019-06-28T07:17:03.000Z
|
#!/usr/bin/env python
"""Test for the ee.featurecollection module."""
import mock
import unittest
import ee
from ee import apitestcase
class FeatureCollectionTestCase(apitestcase.ApiTestCase):
def testConstructors(self):
"""Verifies that constructors understand valid parameters."""
from_id = ee.FeatureCollection('abcd')
self.assertEqual(
ee.ApiFunction.lookup('Collection.loadTable'), from_id.func)
self.assertEqual({'tableId': 'abcd'}, from_id.args)
from_id_and_geom_column = ee.FeatureCollection('abcd', 'xyz')
self.assertEqual(
ee.ApiFunction.lookup('Collection.loadTable'),
from_id_and_geom_column.func)
self.assertEqual({
'tableId': 'abcd',
'geometryColumn': 'xyz'
}, from_id_and_geom_column.args)
geometry = ee.Geometry.Point(1, 2)
feature = ee.Feature(geometry)
geo_json = {'type': 'FeatureCollection', 'features': [geometry.toGeoJSON()]}
from_geometries = ee.FeatureCollection([geometry])
from_single_geometry = ee.FeatureCollection(geometry)
from_features = ee.FeatureCollection([feature])
from_single_feature = ee.FeatureCollection(feature)
from_geo_json = ee.FeatureCollection(geo_json)
self.assertEqual(from_geometries, from_single_geometry)
self.assertEqual(from_geometries, from_features)
self.assertEqual(from_geometries, from_single_feature)
self.assertEqual(from_geometries, from_geo_json)
self.assertEqual(ee.ApiFunction.lookup('Collection'), from_geometries.func)
self.assertEqual({'features': [feature]}, from_geometries.args)
# Test a computed list object.
l = ee.List([feature]).slice(0)
from_list = ee.FeatureCollection(l)
self.assertEqual({'features': l}, from_list.args)
from_computed_object = ee.FeatureCollection(
ee.ComputedObject(None, {'x': 'y'}))
self.assertEqual({'x': 'y'}, from_computed_object.args)
def testGetMapId(self):
"""Verifies that getMap() uses Collection.draw to draw."""
collection = ee.FeatureCollection('test5')
mapid = collection.getMapId({'color': 'ABCDEF'})
manual = ee.ApiFunction.call_('Collection.draw', collection, 'ABCDEF')
self.assertEqual('fakeMapId', mapid['mapid'])
self.assertEqual(manual, mapid['image'])
def testDownload(self):
"""Verifies that Download ID and URL generation."""
ee.FeatureCollection('test7').getDownloadURL('csv')
self.assertEqual('/table', self.last_table_call['url'])
self.assertEqual(ee.FeatureCollection('test7').serialize(),
self.last_table_call['data']['table'].serialize())
ee.FeatureCollection('test8').getDownloadURL(
'json', 'bar, baz', 'qux')
self.assertEqual(
ee.FeatureCollection('test8').serialize(),
self.last_table_call['data']['table'].serialize())
self.assertEqual('JSON', self.last_table_call['data']['format'])
self.assertEqual('bar, baz', self.last_table_call['data']['selectors'])
self.assertEqual('qux', self.last_table_call['data']['filename'])
self.assertEqual(
ee.FeatureCollection('test7').getDownloadUrl('csv'),
ee.FeatureCollection('test7').getDownloadURL('csv'))
def testDownloadTableWithCloudApi(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
create_table_response = {'name': 'table_name'}
cloud_api_resource.projects().tables().create().execute.return_value = (
create_table_response)
fc = ee.FeatureCollection([ee.Feature(None, {'foo': 'bar'})])
result = ee.data.getTableDownloadId({
'table': fc, 'selectors': 'foo', 'format': 'CSV',
})
url = ee.data.makeTableDownloadUrl(result)
self.assertDictEqual(result, {'docid': '5', 'token': '6'})
self.assertEqual(url, '/v1alpha/5:getFeatures')
def testSelect(self):
def equals(c1, c2):
self.assertEqual(c1.serialize(), c2.serialize())
fc = ee.FeatureCollection(ee.Feature(ee.Geometry.Point(0, 0), {'a': 5}))
equals(fc.select('a'), fc.select(['a']))
equals(fc.select('a', 'b'), fc.select(['a', 'b']))
equals(fc.select('a', 'b', 'c'), fc.select(['a', 'b', 'c']))
equals(fc.select('a', 'b', 'c', 'd'), fc.select(['a', 'b', 'c', 'd']))
equals(fc.select(['a']), fc.select(['a'], None, True))
equals(fc.select(['a'], None, False),
fc.select(propertySelectors=['a'], retainGeometry=False))
if __name__ == '__main__':
unittest.main()
| 38.461538
| 80
| 0.676667
|
f3159321d7b74ed2ee361bdf753593813ae10c35
| 3,631
|
py
|
Python
|
REST_Countries/unit_tests.py
|
Jay4C/Cloud_Computing
|
74d77acd77eae7123948441a85e7e6b570d21bc2
|
[
"MIT"
] | null | null | null |
REST_Countries/unit_tests.py
|
Jay4C/Cloud_Computing
|
74d77acd77eae7123948441a85e7e6b570d21bc2
|
[
"MIT"
] | null | null | null |
REST_Countries/unit_tests.py
|
Jay4C/Cloud_Computing
|
74d77acd77eae7123948441a85e7e6b570d21bc2
|
[
"MIT"
] | null | null | null |
import unittest
import requests
class UnitTestsRestCountriesAPI(unittest.TestCase):
def test_all(self):
url = "https://restcountries.eu/rest/v2/all"
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_name(self):
name = "france"
url = "https://restcountries.eu/rest/v2/name/" + name
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_full_name(self):
full_name = "aruba"
url = "https://restcountries.eu/rest/v2/name/" + full_name + "?fullText=true"
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_code(self):
code = "co"
url = "https://restcountries.eu/rest/v2/alpha/" + code
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_list_of_codes(self):
codes = "col;no;ee"
url = "https://restcountries.eu/rest/v2/alpha?codes=" + codes
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_currency(self):
currency = "cop"
url = "https://restcountries.eu/rest/v2/currency/" + currency
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_language(self):
language = "es"
url = "https://restcountries.eu/rest/v2/lang/" + language
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_capital_city(self):
capital_city = "tallinn"
url = "https://restcountries.eu/rest/v2/capital/" + capital_city
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_calling_code(self):
calling_code = "372"
url = "https://restcountries.eu/rest/v2/callingcode/" + calling_code
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_region(self):
region = "europe"
url = "https://restcountries.eu/rest/v2/region/" + region
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_regional_bloc(self):
regional_bloc = "eu"
url = "https://restcountries.eu/rest/v2/regionalbloc/" + regional_bloc
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
def test_filter_response(self):
filters = "name;capital;currencies"
url = "https://restcountries.eu/rest/v2/all?fields=" + filters
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
if __name__ == '__main__':
unittest.main()
| 34.913462
| 85
| 0.599835
|
0941c4a1e2cdc1cbaf15f8a870e228b60430fd75
| 671
|
py
|
Python
|
Algorithms/Implementation/DesignerPDFViewer.py
|
gelaim/HackerRank
|
8b0e51d064a390f6112d54166942db440c88b579
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/DesignerPDFViewer.py
|
gelaim/HackerRank
|
8b0e51d064a390f6112d54166942db440c88b579
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/DesignerPDFViewer.py
|
gelaim/HackerRank
|
8b0e51d064a390f6112d54166942db440c88b579
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'designerPdfViewer' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER_ARRAY h
# 2. STRING word
#
def designerPdfViewer(h, word):
# Write your code here
result = 0
for i in word:
result = max(result, h[ord(i)-97])
return result*len(word)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
h = list(map(int, input().rstrip().split()))
word = input()
result = designerPdfViewer(h, word)
fptr.write(str(result) + '\n')
fptr.close()
| 18.135135
| 50
| 0.652757
|
4617eebdfaa536d847ace94707c6c33e45489645
| 6,164
|
py
|
Python
|
topintersections/intersection_ui.py
|
uberparagon/mgn
|
87eacb93177c9d41edb525bb71ae03ae45f18d14
|
[
"MIT"
] | null | null | null |
topintersections/intersection_ui.py
|
uberparagon/mgn
|
87eacb93177c9d41edb525bb71ae03ae45f18d14
|
[
"MIT"
] | 7
|
2020-03-17T18:07:48.000Z
|
2021-03-19T12:54:49.000Z
|
topintersections/intersection_ui.py
|
uberparagon/mgn
|
87eacb93177c9d41edb525bb71ae03ae45f18d14
|
[
"MIT"
] | 1
|
2020-02-20T09:28:51.000Z
|
2020-02-20T09:28:51.000Z
|
"""
This file implements the user interface. This is the file that should be loaded by the user. It will import everything you need.
"""
from __future__ import absolute_import
#from six.moves.cPickle import dump, load, UnpicklingError
import pickle
try:
from .intersection7 import *
from .TautRing3 import Mgn
from sage.all import *
except ImportError:
pass
default_file = "mgn_top_intersect.dat"
import sys
#if len(sys.argv) >= 2 and sys.argv[1] == "-i": #Print this message if you load from interactive sage.
if False: #don't print this message any more
print("""***************************************************************************
Welcome to the intersection number computer for the moduli space of curves!
Written by Drew Johnson, based on Carel Faber's exposition and Maple code.
The main commands are "intnum" to compute the intersection number, and
"space" to select the space. Type help(intnum) or help(space) for more
information.
Type save_data("filename") to save the results computed this session and
load_data("filname") to retrieve previously saved data. The default filename is
"{0}" if not specified.
Ctrl-D to quit.
***************************************************************************""".format(default_file))
current_space = None
def space(g,n, namespace = None, print_classes = True):
"""
INPUT:
- ``g`` -- The genus.
- ``n`` -- The number of marked points.
- ``print_classes`` -- (optional, defaults to True) Set this to False if you don't want to see a list of the classes
This performs three functions:
1. It prints a list of classes and indexes for easy reference.
2. It sets the defaut space for ``intnum``.
3. It injects the variables names into your namespace so you can use them to make polynomials.
It also returns a Moduli space object, if you want to capture it.
"""
global current_space
current_space = Mgn(g,n,True)
if print_classes:
current_space.rij()
#exprwdata.ExprWithData.reset()
return current_space
def intnum(*args, **keywrds):
r"""
A convenience method for the user to access the intersection number code. There are several accepted syntaxes.
INPUT:
- ``p`` -- a polynomial in variables that can be interpreted as classes on `\mathcal M_{g,n}`. You can inject variables into the global namespace using the ``space(genus, n)`` function. (Use ``space(genus, n, globals())`` if you imported this module instead of loading it.) The intersection will be computed on the space specified by the most recent ``space`` call.
INPUT:
- ``l`` -- a list of indexes into the classes on `\mathcal M_{g,n}`. The intersection will be computed on the space specified by the most recent ``space`` call.
INPUT:
- ``genus``
- ``n`` -- number of marked points
- ``p`` -- a polynomial in variables that can be interpreted as classes on `\mathcal M_{g,n}`. This method call will interpret the variables on the space you specify, NOT on the space specified by the most recent ``space`` call. You may have to call ``space``, however, to ensure that the variables you want to use are defined.
INPUT:
- ``genus``
- ``n`` -- number of marked points
- ``l`` -- a list of indexes into the classes on `\mathcal M_{g,n}`. These indexes are displayed for you when you use the ``space(genus, n)`` command, but it is not necessary to call ``space`` before using this syntax.
"""
global current_space
M = keywrds.get("space")
if M != None:
if len(args) != 1:
print("If you specify the space, you need only one argument!")
if isinstance(args[0], list):
p = prod((M[i] for i in args[0]))
else:
p = change_space(args[0], M)
elif len(args) == 1:
if isinstance(current_space, type(None)):
print('Please specify the genus and number of marked points as the first arguments, or set the default space you wish to work over using the "space" function')
return
M = current_space
if isinstance(args[0], list):
p = prod((M[i] for i in args[0]))
else:
p = args[0]
elif len(args) == 3:
M = Mgn(args[0], args[1])
if isinstance(args[2], list):
p = prod((M[i] for i in args[2]))
else:
p = change_space(args[2],M)
else:
print("Syntax incorrect, please see docstring (type ``help(intnum)``)")
return
if keywrds.get("confirm", True):
print("Computing the intersection of {0} over {1}...".format(repr(p), repr(M)))
try:
return intersect([M],p, keywrds.get("check_degree", True))
except BadDegreeException as excpt:
print(excpt)
def change_space(p, space):
result = 0
for m, coeff in p.monomials():
result += coeff * prod([cl.change_space(space)**expon for cl, expon in m.decompose_monomial()])
return result
def save_data(filename = default_file, prompt = True):
global master_table
import os
if prompt and os.path.exists(filename):
confirm = input("Overwrite existing file " + filename + " (y for yes)? ")
if confirm.lower() != "y":
print("Save aborted.")
return
with open(filename, "wb") as f:
try:
pickle.dump(master_table, f, protocol = 2)
except Exception as ex:
print(ex)
return
print("Save suceeded.")
def load_data(filename = default_file):
global master_table
try:
f = open(filename, "rb")
try:
master_table.update(pickle.load(f))
except pickle.UnpicklingError:
print("Problem loading data... perhaps the data is corrupted or not in the right format?")
return
finally:
f.close()
except IOError:
print("Could not load file. Does the file exist?")
else:
print("Data loaded.")
| 37.357576
| 372
| 0.609507
|
083aeb2563bc391f69079e59851bb405b0b8a926
| 4,264
|
py
|
Python
|
server/tracking.py
|
MrRedbloX/SmartphoneToVirtuality
|
287e0677a12cf1f13ae18a9d5f7354ec20681060
|
[
"MIT"
] | null | null | null |
server/tracking.py
|
MrRedbloX/SmartphoneToVirtuality
|
287e0677a12cf1f13ae18a9d5f7354ec20681060
|
[
"MIT"
] | null | null | null |
server/tracking.py
|
MrRedbloX/SmartphoneToVirtuality
|
287e0677a12cf1f13ae18a9d5f7354ec20681060
|
[
"MIT"
] | null | null | null |
import cv2
from VideoCaptureAsync import VideoCaptureAsync
import numpy as np
from math import *
from operator import and_,truth,add
from functools import reduce
from itertools import *
from more_itertools import *
import argparse
from client import ODUdp
from time import time
from reconstruction import Reconstructor
parser = argparse.ArgumentParser(description='Track the highest value point in videos.')
parser.add_argument('--inputs', type=str, nargs='+',default=['/dev/video{}'.format(x) for x in [0,2,4]])
parser.add_argument('--params', type=str, nargs='+')
parser.add_argument('--shrink', type=int,default=1)
parser.add_argument('--width',type=int,default=640)
parser.add_argument('--height',type=int,default=360)
parser.add_argument('--framerate',type=int,default=20)
parser.add_argument('--xtile',type=int,default=1)
parser.add_argument('--oversample',type=int,default=4)
parser.add_argument('--mask',type=int,default=16)
parser.add_argument('--ip',type=str,default="localhost")
parser.add_argument('--port',type=int,default=4269)
args = parser.parse_args()
print(args)
odudp = ODUdp(args.ip, args.port)
reconstructor = Reconstructor([np.load(path) for path in args.params])
tflip = lambda a,b : (b,a)
shrink = lambda h,w : (int(h/args.shrink),int(w/args.shrink))
print(shrink(args.height,args.width))
shrinkFrame = lambda frame : cv2.resize(frame,tflip(*shrink(*frame.shape[:2])))
fillvalue = np.zeros((*shrink(args.height,args.width),3),dtype=np.uint8)
VCA = lambda input: VideoCaptureAsync(input,args.width,args.height,args.framerate,args.oversample)
caps = [VCA(input) for input in args.inputs]
for cap in caps:
cap.start()
# build the hitboxes for each view
hbx = []
for y in range(ceil(len(caps)/2)):
for x in range(args.xtile):
hbx.append((x*args.width,y*args.height))
# removes empty views
hbx = hbx[:len(caps)]
print(hbx)
lasts = [None] * len(caps)
def dist_mask(shape,center):
X, Y = np.ogrid[:shape[0], :shape[1]]
return np.sqrt((X - center[0])**2 + (Y-center[1])**2)
def marker(frame,point,z=0,size=2,value=0):
frame[dist_mask(frame.shape,point) <= size] = value
def register(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN or event == cv2.EVENT_RBUTTONDOWN:
for box,i in zip(param,range(len(param))):
bx,by = shrink(*box)
w,h = shrink(args.width,args.height)
# inside rectangle
if bx<x<(bx+w) and by<y<(by+h):
if event == cv2.EVENT_LBUTTONDOWN:
# remap window coordinate to data ones
lasts[i] = (args.shrink*(y-by),args.shrink*(x-bx) )
if event == cv2.EVENT_RBUTTONDOWN:
lasts[i] = None
def deform(frame):
return np.sum(frame,axis=-1) / (255.0 * 3)
def find(frame,last):
dmask = dist_mask(frame.shape,last)
cmask = dmask <= args.mask
hyp = np.max(dmask)
# Absolutly limits the distance between two consecutive matchs
frame[~cmask] = 0
# Makes the pixel values proportional to their distance from the last match
frame -= dmask/hyp
# Take the highest one
return np.unravel_index(frame.argmax(),frame.shape)
# Read the first frame of each capture
acks,frames = list(zip(*(cap.read() for cap in caps)))
while reduce(and_,acks):
displayed = [None] * len(caps)
# Read frames
acks,frames = zip(*(cap.read() for cap in caps))
for i, frame, last in zip( count(), frames, lasts ):
displayed[i] = shrinkFrame(frame.copy())
if last:
# Find the new best match
lasts[i] = find(deform(frame),last)
marker(displayed[i],(shrink(*lasts[i])),size=5,value=(255,0,0))
# Finds 3D position from tracked image points
if reduce(add,map(truth,lasts)) >= 2:
position = reconstructor.reconstruct(*lasts)
if position.any():
odudp.sendto(odudp.get_byte_from(*position))
# Puts the images in a grid
compose = np.vstack([np.hstack(group) for group in grouper(displayed,args.xtile,fillvalue)])
cv2.imshow('frame',compose)
cv2.setMouseCallback('frame',register,param=hbx)
if mask:= cv2.waitKey(1) & 0xFF:
if mask == ord('q'):
break
for cap in caps:
cap.stop()
| 31.820896
| 104
| 0.665338
|
1b8fdc440f69c2eeab595e914ac2c8e98a97b725
| 20,052
|
py
|
Python
|
vispy/app/canvas.py
|
MatthieuDartiailh/vispy
|
09d429be361a148b0614a192f56d4070c624072c
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/app/canvas.py
|
MatthieuDartiailh/vispy
|
09d429be361a148b0614a192f56d4070c624072c
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/app/canvas.py
|
MatthieuDartiailh/vispy
|
09d429be361a148b0614a192f56d4070c624072c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division, print_function
import numpy as np
from ._default_app import default_app
from ..util.event import EmitterGroup, Event, WarningEmitter
from ..util.ptime import time
from ..ext.six import string_types
from .application import Application
from ._config import get_default_config
# todo: add functions for asking about current mouse/keyboard state
# todo: add hover enter/exit events
# todo: add focus events
def _gloo_initialize(event):
from ..gloo import gl_initialize
gl_initialize()
class Canvas(object):
"""Representation of a GUI element with an OpenGL context
Receives the following events:
initialize, resize, draw, mouse_press, mouse_release, mouse_move,
mouse_wheel, key_press, key_release, stylus, touch, close
Parameters
----------
title : str
The widget title
size : (width, height)
The size of the window.
position : (x, y)
The position of the window in screen coordinates.
show : bool
Whether to show the widget immediately. Default False.
autoswap : bool
Whether to swap the buffers automatically after a draw event.
Default True. If True, the ``swap_buffers`` Canvas method will
be called last (by default) by the ``canvas.draw`` event handler.
app : Application | str
Give vispy Application instance to use as a backend.
(vispy.app is used by default.) If str, then an application
using the chosen backend (e.g., 'pyglet') will be created.
Note the canvas application can be accessed at ``canvas.app``.
create_native : bool
Whether to create the widget immediately. Default True.
init_gloo : bool
Initialize standard values in gloo (e.g., ``GL_POINT_SPRITE``).
vsync : bool
Enable vertical synchronization.
resizable : bool
Allow the window to be resized.
decorate : bool
Decorate the window.
fullscreen : bool | int
If False, windowed mode is used (default). If True, the default
monitor is used. If int, the given monitor number is used.
context : dict | instance SharedContext | None
OpenGL configuration to use when creating the context for the canvas,
or a context to share. If None, ``vispy.app.get_default_config`` will
be used to set the OpenGL context parameters. Alternatively, the
``canvas.context`` property from an existing canvas (using the
same backend) will return a ``SharedContext`` that can be used,
thereby sharing the existing context.
close_keys : str | list of str
Key to use that will cause the canvas to be closed.
"""
def __init__(self, title='Vispy canvas', size=(800, 600), position=None,
show=False, autoswap=True, app=None, create_native=True,
init_gloo=True, vsync=False, resizable=True, decorate=True,
fullscreen=False, context=None, close_keys=()):
size = [int(s) for s in size]
if len(size) != 2:
raise ValueError('size must be a 2-element list')
title = str(title)
if not isinstance(fullscreen, (bool, int)):
raise TypeError('fullscreen must be bool or int')
if context is None:
context = get_default_config()
# Initialize some values
self._autoswap = autoswap
self._title = title
self._frame_count = 0
self._fps = 0
self._basetime = time()
self._fps_callback = None
self._backend = None
# Create events
self.events = EmitterGroup(source=self,
initialize=Event,
resize=ResizeEvent,
draw=DrawEvent,
mouse_press=MouseEvent,
mouse_release=MouseEvent,
mouse_move=MouseEvent,
mouse_wheel=MouseEvent,
key_press=KeyEvent,
key_release=KeyEvent,
stylus=Event,
touch=Event,
close=Event)
# Deprecated paint emitter
emitter = WarningEmitter('Canvas.events.paint and Canvas.on_paint are '
'deprecated; use Canvas.events.draw and '
'Canvas.on_draw instead.',
source=self, type='draw',
event_class=DrawEvent)
self.events.add(paint=emitter)
self.events.draw.connect(self.events.paint)
# Initialize gloo settings
if init_gloo:
self.events.initialize.connect(_gloo_initialize,
ref='gloo_initialize')
# store arguments that get set on Canvas init
kwargs = dict(title=title, size=size, position=position, show=show,
vsync=vsync, resizable=resizable, decorate=decorate,
fullscreen=fullscreen, context=context)
self._backend_kwargs = kwargs
# Get app instance
if isinstance(app, string_types):
app = Application(app)
self._app = default_app if app is None else app
# Create widget now
if create_native:
self.create_native()
# Close keys
def close_keys_check(event):
if event.key in self.close_keys:
self.close()
if isinstance(close_keys, string_types):
close_keys = [close_keys]
self.close_keys = close_keys
self.events.key_press.connect(close_keys_check, ref=True)
def create_native(self):
""" Create the native widget if not already done so. If the widget
is already created, this function does nothing.
"""
if self._backend is not None:
return
# Make sure that the app is active
self._app.use()
assert self._app.native
# Instantiate the backend with the right class
be = self._app.backend_module.CanvasBackend(**self._backend_kwargs)
self._set_backend(be)
def _set_backend(self, backend):
""" Set backend<->canvas references and autoswap
"""
assert backend is not None # should never happen
self._backend = backend
self._backend._vispy_canvas = self
if self._autoswap:
# append to the end
self.events.draw.connect((self, 'swap_buffers'),
ref=True, position='last')
@property
def context(self):
""" The OpenGL context of the native widget
"""
return self._backend._vispy_context
@property
def app(self):
""" The vispy Application instance on which this Canvas is based.
"""
return self._app
@property
def native(self):
""" The native widget object on which this Canvas is based.
"""
return self._backend._vispy_get_native_canvas()
def connect(self, fun):
""" Connect a function to an event. The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorater on a function
definition for an event handler.
"""
# Get and check name
name = fun.__name__
if not name.startswith('on_'):
raise ValueError('When connecting a function based on its name, '
'the name should start with "on_"')
eventname = name[3:]
# Get emitter
try:
emitter = self.events[eventname]
except KeyError:
raise ValueError(
'Event "%s" not available on this canvas.' %
eventname)
# Connect
emitter.connect(fun)
# ---------------------------------------------------------------- size ---
@property
def size(self):
""" The size of canvas/window """
return self._backend._vispy_get_size()
@size.setter
def size(self, size):
return self._backend._vispy_set_size(size[0], size[1])
# ------------------------------------------------------------ position ---
@property
def position(self):
""" The position of canvas/window relative to screen """
return self._backend._vispy_get_position()
@position.setter
def position(self, position):
assert len(position) == 2
return self._backend._vispy_set_position(position[0], position[1])
# --------------------------------------------------------------- title ---
@property
def title(self):
""" The title of canvas/window """
return self._title
@title.setter
def title(self, title):
self._title = title
self._backend._vispy_set_title(title)
# ----------------------------------------------------------------- fps ---
@property
def fps(self):
""" The fps of canvas/window, measured as the rate that events.draw
is emitted. """
return self._fps
def swap_buffers(self, event=None):
""" Swap GL buffers such that the offscreen buffer becomes visible.
"""
self._backend._vispy_swap_buffers()
def show(self, visible=True):
""" Show (or hide) the canvas """
return self._backend._vispy_set_visible(visible)
def update(self):
""" Inform the backend that the Canvas needs to be redrawn """
if self._backend is not None:
return self._backend._vispy_update()
else:
return
def close(self):
""" Close the canvas
Note: This will usually destroy the GL context. For Qt, the context
(and widget) will be destroyed only if the widget is top-level.
To avoid having the widget destroyed (more like standard Qt
behavior), consider making the widget a sub-widget.
"""
if self._backend is not None:
self._backend._vispy_close()
self._backend._vispy_canvas = None
def _update_fps(self, event):
""" Updates the fps after every window and resets the basetime
and frame count to current time and 0, respectively
"""
self._frame_count += 1
diff = time() - self._basetime
if (diff > self._fps_window):
self._fps = self._frame_count/diff
self._basetime = time()
self._frame_count = 0
self._fps_callback(self.fps)
def measure_fps(self, window=1, callback=print):
"""Measure the current FPS
Sets the update window, connects the draw event to
update_fps and sets the callback function
If no callback is passed, measurement stops.
Parameters
----------
window : int
The window number.
callback : function
The function to call with the FPS. Default is ``print``.
"""
# Connect update_fps function to draw
self.events.draw.disconnect(self._update_fps)
if callback:
self._fps_window = window
self.events.draw.connect(self._update_fps)
self._fps_callback = callback
else:
self._fps_callback = None
# ---------------------------------------------------------------- misc ---
def __repr__(self):
return ('<Vispy canvas (%s backend) at %s>'
% (self.app.backend_name, hex(id(self))))
def __enter__(self):
self.show()
self._backend._vispy_warmup()
return self
def __exit__(self, type, value, traceback):
self.swap_buffers() # ensure all GL calls are complete
self.close()
# def mouse_event(self, event):
#"""Called when a mouse input event has occurred (the mouse has moved,
# a button was pressed/released, or the wheel has moved)."""
# def key_event(self, event):
#"""Called when a keyboard event has occurred (a key was pressed or
# released while the canvas has focus)."""
# def touch_event(self, event):
#"""Called when the user touches the screen over a Canvas.
# Event properties:
# event.touches
# [ (x,y,pressure), ... ]
#"""
# def stylus_event(self, event):
#"""Called when a stylus has been used to interact with the Canvas.
# Event properties:
# event.device
# event.pos (x,y)
# event.pressure
# event.angle
#"""
# def initialize_event(self, event):
#"""Called when the OpenGL context is initialy made available for this
# Canvas."""
# def resize_event(self, event):
#"""Called when the Canvas is resized.
# Event properties:
# event.size (w,h)
#"""
# def draw_event(self, event):
#"""Called when all or part of the Canvas needs to be redrawn.
# Event properties:
# event.region (x,y,w,h) region of Canvas requiring redraw
#"""
# Event subclasses specific to the Canvas
class MouseEvent(Event):
"""Mouse event class
Note that each event object has an attribute for each of the input
arguments listed below.
Parameters
----------
type : str
String indicating the event type (e.g. mouse_press, key_release)
pos : (int, int)
The position of the mouse (in screen coordinates).
button : int
The button that generated this event (can be None).
Left=1, right=2, middle=3.
buttons : [int, ...]
The list of buttons depressed during this event.
modifiers : tuple of Key instances
Tuple that specifies which modifier keys were pressed down at the
time of the event (shift, control, alt, meta).
delta : (float, float)
The amount of scrolling in horizontal and vertical direction. One
"tick" corresponds to a delta of 1.0.
press_event : MouseEvent
The press event that was generated at the start of the current drag,
if any.
last_event : MouseEvent
The MouseEvent immediately preceding the current event. During drag
operations, all generated events retain their last_event properties,
allowing the entire drag to be reconstructed.
native : object (optional)
The native GUI event object
**kwds : keyword arguments
All extra keyword arguments become attributes of the event object.
"""
def __init__(self, type, pos=None, button=None, buttons=None,
modifiers=None, delta=None, last_event=None, press_event=None,
**kwds):
Event.__init__(self, type, **kwds)
self._pos = (0, 0) if (pos is None) else (pos[0], pos[1])
self._button = int(button) if (button is not None) else 0
self._buttons = [] if (buttons is None) else buttons
self._modifiers = tuple(modifiers or ())
self._delta = (0.0, 0.0) if (delta is None) else (delta[0], delta[1])
self._last_event = last_event
self._press_event = press_event
@property
def pos(self):
return self._pos
@property
def button(self):
return self._button
@property
def buttons(self):
return self._buttons
@property
def modifiers(self):
return self._modifiers
@property
def delta(self):
return self._delta
@property
def press_event(self):
return self._press_event
@property
def last_event(self):
return self._last_event
def _forget_last_event(self):
# Needed to break otherwise endless last-event chains
self._last_event = None
@property
def is_dragging(self):
""" Indicates whether this event is part of a mouse drag operation.
"""
return self.press_event is not None
def drag_events(self):
""" Return a list of all mouse events in the current drag operation.
Returns None if there is no current drag operation.
"""
if not self.is_dragging:
return None
event = self
events = []
while True:
# mouse_press events can only be the start of a trail
if event is None or event.type == 'mouse_press':
break
events.append(event)
event = event.last_event
return events[::-1]
def trail(self):
""" Return an (N, 2) array of mouse coordinates for every event in the
current mouse drag operation.
Returns None if there is no current drag operation.
"""
events = self.drag_events()
if events is None:
return None
trail = np.empty((len(events), 2), dtype=int)
for i, ev in enumerate(events):
trail[i] = ev.pos
return trail
class KeyEvent(Event):
"""Key event class
Note that each event object has an attribute for each of the input
arguments listed below.
Parameters
----------
type : str
String indicating the event type (e.g. mouse_press, key_release)
key : vispy.keys.Key instance
The Key object for this event. Can be compared to string names.
text : str
The text representation of the key (can be an empty string).
modifiers : tuple of Key instances
Tuple that specifies which modifier keys were pressed down at the
time of the event (shift, control, alt, meta).
native : object (optional)
The native GUI event object
**kwds : keyword arguments
All extra keyword arguments become attributes of the event object.
"""
def __init__(self, type, key=None, text='', modifiers=None, **kwds):
Event.__init__(self, type, **kwds)
self._key = key
self._text = text
self._modifiers = tuple(modifiers or ())
@property
def key(self):
return self._key
@property
def text(self):
return self._text
@property
def modifiers(self):
return self._modifiers
class ResizeEvent(Event):
""" Resize event class
Note that each event object has an attribute for each of the input
arguments listed below.
Parameters
----------
type : str
String indicating the event type (e.g. mouse_press, key_release)
size : (int, int)
The new size of the Canvas.
native : object (optional)
The native GUI event object
**kwds : extra keyword arguments
All extra keyword arguments become attributes of the event object.
"""
def __init__(self, type, size=None, **kwds):
Event.__init__(self, type, **kwds)
self._size = tuple(size)
@property
def size(self):
return self._size
class DrawEvent(Event):
""" Draw event class
This type of event is sent to Canvas.events.draw when a redraw
is required.
Note that each event object has an attribute for each of the input
arguments listed below.
Parameters
----------
type : str
String indicating the event type (e.g. mouse_press, key_release)
region : (int, int, int, int) or None
The region of the canvas which needs to be redrawn (x, y, w, h).
If None, the entire canvas must be redrawn.
native : object (optional)
The native GUI event object
**kwds : extra keyword arguments
All extra keyword arguments become attributes of the event object.
"""
def __init__(self, type, region=None, **kwds):
Event.__init__(self, type, **kwds)
self._region = region
@property
def region(self):
return self._region
| 33.198675
| 79
| 0.592509
|
a12a4c4a5e8878baa8e02622ebde5d5722f131db
| 1,134
|
py
|
Python
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/tests/storage/test_impl_log.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 2
|
2018-11-07T20:52:53.000Z
|
2019-10-20T15:57:01.000Z
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/tests/storage/test_impl_log.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 3
|
2021-12-14T20:57:54.000Z
|
2022-01-21T23:50:36.000Z
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/tests/storage/test_impl_log.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 2
|
2018-11-16T04:20:06.000Z
|
2019-03-28T23:49:13.000Z
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/impl_log.py
"""
from oslotest import base
from ceilometer.storage import impl_log
class ConnectionTest(base.BaseTestCase):
def test_get_connection(self):
conn = impl_log.Connection(None)
conn.record_metering_data({'counter_name': 'test',
'resource_id': __name__,
'counter_volume': 1,
})
| 36.580645
| 76
| 0.662257
|
386381eebbd8b236919f3d51ecbc5193c0ebc65e
| 572
|
py
|
Python
|
terrascript/provider/invidian/stdlib.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/provider/invidian/stdlib.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/provider/invidian/stdlib.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/provider/invidian/stdlib.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:27:59 UTC)
import terrascript
class stdlib(terrascript.Provider):
"""Terraform provider with collection of useful data sources"""
__description__ = "Terraform provider with collection of useful data sources"
__namespace__ = "invidian"
__name__ = "stdlib"
__source__ = "https://github.com/invidian/terraform-provider-stdlib"
__version__ = "0.1.1"
__published__ = "2020-08-18T20:52:51Z"
__tier__ = "community"
__all__ = ["stdlib"]
| 28.6
| 81
| 0.729021
|
3b546852b0c60a8f241edbab80c55123aa487877
| 10,597
|
py
|
Python
|
airbyte-integrations/connectors/source-google-sheets/unit_tests/test_helpers.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-google-sheets/unit_tests/test_helpers.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-google-sheets/unit_tests/test_helpers.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
from unittest.mock import Mock, patch
from airbyte_protocol import AirbyteRecordMessage, AirbyteStream, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, SyncMode
from airbyte_protocol.models.airbyte_protocol import DestinationSyncMode
from base_python import AirbyteLogger
from google_sheets_source.client import GoogleSheetsClient
from google_sheets_source.helpers import Helpers
from google_sheets_source.models import CellData, GridData, RowData, Sheet, SheetProperties, Spreadsheet
logger = AirbyteLogger()
class TestHelpers(unittest.TestCase):
def test_headers_to_airbyte_stream(self):
sheet_name = "sheet1"
header_values = ["h1", "h2", "h3"]
expected_stream = AirbyteStream(
name=sheet_name,
json_schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
# For simplicity, the type of every cell is a string
"properties": {header: {"type": "string"} for header in header_values},
},
)
actual_stream = Helpers.headers_to_airbyte_stream(logger, sheet_name, header_values)
self.assertEqual(expected_stream, actual_stream)
def test_duplicate_headers_retrived(self):
header_values = ["h1", "h1", "h3"]
expected_valid_header_values = ["h3"]
expected_duplicate_header_values = ["h1"]
actual_header_values, actual_duplicate_header_values = Helpers.get_valid_headers_and_duplicates(header_values)
self.assertEqual(expected_duplicate_header_values, actual_duplicate_header_values)
self.assertEqual(expected_valid_header_values, actual_header_values)
def test_duplicate_headers_to_ab_stream_ignores_duplicates(self):
sheet_name = "sheet1"
header_values = ["h1", "h1", "h3"]
# h1 is ignored because it is duplicate
expected_stream_header_values = ["h3"]
expected_stream = AirbyteStream(
name=sheet_name,
json_schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
# For simplicity, the type of every cell is a string
"properties": {header: {"type": "string"} for header in expected_stream_header_values},
},
)
actual_stream = Helpers.headers_to_airbyte_stream(logger, sheet_name, header_values)
self.assertEqual(expected_stream, actual_stream)
def test_headers_to_airbyte_stream_blank_values_terminate_row(self):
sheet_name = "sheet1"
header_values = ["h1", "", "h3"]
expected_stream = AirbyteStream(
name=sheet_name,
json_schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
# For simplicity, the type of every cell is a string
"properties": {"h1": {"type": "string"}},
},
)
actual_stream = Helpers.headers_to_airbyte_stream(logger, sheet_name, header_values)
self.assertEqual(expected_stream, actual_stream)
def test_is_row_empty_with_empty_row(self):
values = [" ", "", " "]
self.assertTrue(Helpers.is_row_empty(values))
def test_is_row_empty_with_full_row(self):
values = [" ", "", " ", "somevaluehere"]
self.assertFalse(Helpers.is_row_empty(values))
def test_row_contains_relevant_data(self):
values = ["c1", "c2", "c3"]
relevant_indices = [2]
self.assertTrue(Helpers.row_contains_relevant_data(values, relevant_indices))
def test_row_contains_relevant_data_is_false(self):
values = ["", "", "c3"]
relevant_indices = [0, 1]
self.assertFalse(Helpers.row_contains_relevant_data(values, relevant_indices))
def test_parse_sheet_and_column_names_from_catalog(self):
sheet1 = "soccer_team"
sheet1_columns = frozenset(["arsenal", "chelsea", "manutd", "liverpool"])
sheet1_schema = {"properties": {c: {"type": "string"} for c in sheet1_columns}}
sheet2 = "basketball_teams"
sheet2_columns = frozenset(["gsw", "lakers"])
sheet2_schema = {"properties": {c: {"type": "string"} for c in sheet2_columns}}
catalog = ConfiguredAirbyteCatalog(
streams=[
ConfiguredAirbyteStream(
stream=AirbyteStream(name=sheet1, json_schema=sheet1_schema),
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.overwrite,
),
ConfiguredAirbyteStream(
stream=AirbyteStream(name=sheet2, json_schema=sheet2_schema),
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.overwrite,
),
]
)
actual = Helpers.parse_sheet_and_column_names_from_catalog(catalog)
expected = {sheet1: sheet1_columns, sheet2: sheet2_columns}
self.assertEqual(actual, expected)
def test_row_data_to_record_message(self):
sheet = "my_sheet"
cell_values = ["v1", "v2", "v3", "v4"]
column_index_to_name = {0: "c1", 3: "c4"}
actual = Helpers.row_data_to_record_message(sheet, cell_values, column_index_to_name)
expected = AirbyteRecordMessage(stream=sheet, data={"c1": "v1", "c4": "v4"}, emitted_at=1)
self.assertEqual(expected.stream, actual.stream)
self.assertEqual(expected.data, actual.data)
def test_get_formatted_row_values(self):
expected = [str(i) for i in range(10)]
row_data = RowData(values=[CellData(formattedValue=x) for x in expected])
actual = Helpers.get_formatted_row_values(row_data)
self.assertEqual(expected, actual)
def test_get_first_row(self):
spreadsheet_id = "123"
sheet = "s1"
expected_first_row = ["1", "2", "3", "4"]
fake_response = Spreadsheet(
spreadsheetId=spreadsheet_id,
sheets=[Sheet(data=[GridData(rowData=[RowData(values=[CellData(formattedValue=v) for v in expected_first_row])])])],
)
client = Mock()
client.get.return_value.execute.return_value = fake_response
with patch.object(GoogleSheetsClient, "__init__", lambda s, credentials, scopes: None):
sheet_client = GoogleSheetsClient({"fake": "credentials"}, ["auth_scopes"])
sheet_client.client = client
actual = Helpers.get_first_row(sheet_client, spreadsheet_id, sheet)
self.assertEqual(expected_first_row, actual)
client.get.assert_called_with(spreadsheetId=spreadsheet_id, includeGridData=True, ranges=f"{sheet}!1:1")
def test_get_sheets_in_spreadsheet(self):
spreadsheet_id = "id1"
expected_sheets = ["s1", "s2"]
client = Mock()
client.get.return_value.execute.return_value = Spreadsheet(
spreadsheetId=spreadsheet_id, sheets=[Sheet(properties=SheetProperties(title=t)) for t in expected_sheets]
)
with patch.object(GoogleSheetsClient, "__init__", lambda s, credentials, scopes: None):
sheet_client = GoogleSheetsClient({"fake": "credentials"}, ["auth_scopes"])
sheet_client.client = client
actual_sheets = Helpers.get_sheets_in_spreadsheet(sheet_client, spreadsheet_id)
self.assertEqual(expected_sheets, actual_sheets)
client.get.assert_called_with(spreadsheetId=spreadsheet_id, includeGridData=False)
def test_get_available_sheets_to_column_index_to_name(self):
# To mock different return values depending on input args, we use side effects with this method
spreadsheet_id = "123"
sheet1 = "s1"
sheet1_first_row = ["1", "2", "3", "4"]
# Since pytest and unittest don't give a clean way to mock responses for exact input arguments,
# we use .side_effect to achieve this. This dict structure is spreadsheet_id -> includeGridData -> ranges
def mock_client_call(spreadsheetId, includeGridData, ranges=None):
if spreadsheetId != spreadsheet_id:
return None
# the spreadsheet only contains sheet1
elif not includeGridData and ranges is None:
mocked_return = Spreadsheet(spreadsheetId=spreadsheet_id, sheets=[Sheet(properties=SheetProperties(title=sheet1))])
elif includeGridData and ranges == f"{sheet1}!1:1":
mocked_return = Spreadsheet(
spreadsheetId=spreadsheet_id,
sheets=[Sheet(data=[GridData(rowData=[RowData(values=[CellData(formattedValue=v) for v in sheet1_first_row])])])],
)
m = Mock()
m.execute.return_value = mocked_return
return m
client = Mock()
client.get.side_effect = mock_client_call
with patch.object(GoogleSheetsClient, "__init__", lambda s, credentials, scopes: None):
sheet_client = GoogleSheetsClient({"fake": "credentials"}, ["auth_scopes"])
sheet_client.client = client
actual = Helpers.get_available_sheets_to_column_index_to_name(
sheet_client, spreadsheet_id, {sheet1: frozenset(sheet1_first_row), "doesnotexist": frozenset(["1", "2"])}
)
expected = {sheet1: {0: "1", 1: "2", 2: "3", 3: "4"}}
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
| 43.430328
| 134
| 0.66868
|
d1de6a08c19e8651d45bf0dd7c7ba9d604a53b84
| 3,165
|
py
|
Python
|
scripts/evaluate_on_all.py
|
ikergarcia1996/word-embeddings-benchmarks
|
da035aacf799d268a707f8e8f98a69f1f4618d73
|
[
"MIT"
] | null | null | null |
scripts/evaluate_on_all.py
|
ikergarcia1996/word-embeddings-benchmarks
|
da035aacf799d268a707f8e8f98a69f1f4618d73
|
[
"MIT"
] | null | null | null |
scripts/evaluate_on_all.py
|
ikergarcia1996/word-embeddings-benchmarks
|
da035aacf799d268a707f8e8f98a69f1f4618d73
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script calculates embedding results against all available fast running
benchmarks in the repository and saves results as single row csv table.
Usage: ./evaluate_on_all -f <path to file> -o <path to output file>
NOTE:
* script doesn't evaluate on WordRep (nor its subset) as it is non standard
for now and long running (unless some nearest neighbor approximation is used).
* script is using CosAdd for calculating analogy answer.
* script is not reporting results per category (for instance semantic/syntactic) in analogy benchmarks.
It is easy to change it by passing category parameter to evaluate_analogy function (see help).
"""
from optparse import OptionParser
import logging
import os
from web.embeddings import fetch_GloVe, load_embedding
from web.datasets.utils import _get_dataset_dir
from web.evaluate import evaluate_on_all
# Configure logging
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%I:%M:%S')
logger = logging.getLogger(__name__)
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="Path to the file with embedding. If relative will load from data directory.",
default=None)
parser.add_option("-p", "--format", dest="format",
help="Format of the embedding, possible values are: word2vec, word2vec_bin, dict and glove.",
default=None)
parser.add_option("-o", "--output", dest="output",
help="Path where to save results.",
default=None)
parser.add_option("-c", "--clean_words", dest="clean_words",
help="Clean_words argument passed to load_embedding function. If set to True will remove"
"most of the non-alphanumeric characters, which should speed up evaluation.",
default=False)
if __name__ == "__main__":
(options, args) = parser.parse_args()
# Load embeddings
fname = options.filename
if not fname:
w = fetch_GloVe(corpus="wiki-6B", dim=300)
else:
if not os.path.isabs(fname):
fname = os.path.join(_get_dataset_dir(), fname)
format = options.format
if not format:
_, ext = os.path.splitext(fname)
if ext == ".bin":
format = "word2vec_bin"
elif ext == ".txt":
format = "word2vec"
elif ext == ".pkl":
format = "dict"
assert format in ['word2vec_bin', 'word2vec', 'glove', 'bin'], "Unrecognized format"
load_kwargs = {}
if format == "glove":
load_kwargs['vocab_size'] = sum(1 for line in open(fname))
load_kwargs['dim'] = len(next(open(fname)).split()) - 1
w = load_embedding(fname, format=format, normalize=True, lower=False, clean_words=options.clean_words,
load_kwargs=load_kwargs)
out_fname = options.output if options.output else "results.csv"
results = evaluate_on_all(w)
logger.info("Saving results...")
print(results)
results.to_csv(out_fname)
| 35.965909
| 111
| 0.64455
|
2067bc826451757bd1a58930dce910f188db5412
| 671
|
py
|
Python
|
textkit/tokenize/words.py
|
learntextvis/textkit
|
6c482ea7d378022e10ee1002f9d9db7925856294
|
[
"MIT"
] | 29
|
2016-01-13T00:55:53.000Z
|
2022-02-03T06:38:10.000Z
|
textkit/tokenize/words.py
|
irealva/textkit
|
858053a0e4c1f2cc4160873a4eb429c518f48ca2
|
[
"MIT"
] | 46
|
2016-01-18T19:17:50.000Z
|
2019-12-20T04:55:04.000Z
|
textkit/tokenize/words.py
|
irealva/textkit
|
858053a0e4c1f2cc4160873a4eb429c518f48ca2
|
[
"MIT"
] | 9
|
2016-04-24T20:01:37.000Z
|
2019-01-15T08:54:11.000Z
|
import click
import nltk
from textkit.utils import output
@click.command()
@click.argument('text', type=click.Path(exists=True), nargs=-1)
def text2words(text):
'''Tokenize text into word tokens.
Punctuation is considered as a separate token.'''
content = '\n'.join([open(f).read() for f in text])
tokens = []
try:
tokens = nltk.word_tokenize(content)
except LookupError as err:
click.echo(message="Error with tokenization", nl=True)
click.echo(message="Have you run \"textkit download\"?", nl=True)
click.echo(message="\nOriginal Error:", nl=True)
click.echo(err)
[output(token) for token in tokens]
| 31.952381
| 73
| 0.66468
|
30a27d90c416beedfdef7467bbbe5ecc57082231
| 684
|
py
|
Python
|
blocks/metrics.py
|
Aaron-Jin-Xu/probabilistic-semantic-image-inpainting
|
8ce630eaf7e8f9ef5fc5ad19d5474d050d71807d
|
[
"MIT"
] | 1
|
2020-04-19T22:48:25.000Z
|
2020-04-19T22:48:25.000Z
|
blocks/metrics.py
|
Aaron-Jin-Xu/probabilistic-semantic-image-inpainting
|
8ce630eaf7e8f9ef5fc5ad19d5474d050d71807d
|
[
"MIT"
] | null | null | null |
blocks/metrics.py
|
Aaron-Jin-Xu/probabilistic-semantic-image-inpainting
|
8ce630eaf7e8f9ef5fc5ad19d5474d050d71807d
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
# def nats_per_dim(nll, dim):
# -nll / dim + np.log(128)
#
# def nats_to_bits(nats):
# return nats / np.log(2)
#
# def bits_per_dim(nll, dim):
# return nats_to_bits(nats_per_dim(nll, dim))
#
# def nats_per_dim_tf(nll, dim):
# return -nll / dim + tf.math.log(128.)
#
# def nats_to_bits_tf(nats):
# return nats / tf.math.log(2.)
#
# def bits_per_dim_tf(nll, dim):
# return nats_to_bits_tf(nats_per_dim_tf(nll, dim))
def bits_per_dim_tf(nll, dim, unit=1.0):
# https://www.reddit.com/r/MachineLearning/comments/56m5o2/discussion_calculation_of_bitsdims/
return (nll / dim + tf.math.log(unit)) / tf.math.log(2.)
| 27.36
| 98
| 0.678363
|
ee7044dbb8c3a91a8de2801ac338b16e46b90274
| 193
|
py
|
Python
|
source_code/pypeflow/core/__init__.py
|
TomLXXVI/pypeflow
|
49e42621180ec3125afa238d3ca56ae9f3a7662a
|
[
"MIT"
] | 4
|
2020-05-26T01:11:08.000Z
|
2021-09-15T20:24:31.000Z
|
source_code/pypeflow/core/__init__.py
|
robertspark/pypeflow
|
49e42621180ec3125afa238d3ca56ae9f3a7662a
|
[
"MIT"
] | null | null | null |
source_code/pypeflow/core/__init__.py
|
robertspark/pypeflow
|
49e42621180ec3125afa238d3ca56ae9f3a7662a
|
[
"MIT"
] | 1
|
2022-01-19T20:26:11.000Z
|
2022-01-19T20:26:11.000Z
|
"""
# Core modules around which PyFlow is built
"""
from pypeflow.core.pipe import Pipe
from pypeflow.core.fitting import Fitting
from pypeflow.core.valves import BalancingValve, ControlValve
| 24.125
| 61
| 0.803109
|
8d828599024b8a15235d4225ef224e9f9144a56e
| 2,473
|
py
|
Python
|
scripts/pineapple/pydwarf.useablemats.py
|
Charcoal-Apple/PyDwarf
|
6f2dd294509c83802e06b9ca5e084f89c9fe4bda
|
[
"Zlib"
] | 49
|
2015-05-21T19:40:41.000Z
|
2021-09-30T03:04:32.000Z
|
scripts/pineapple/pydwarf.useablemats.py
|
Charcoal-Apple/PyDwarf
|
6f2dd294509c83802e06b9ca5e084f89c9fe4bda
|
[
"Zlib"
] | 6
|
2015-05-30T01:50:01.000Z
|
2021-09-22T14:22:41.000Z
|
scripts/pineapple/pydwarf.useablemats.py
|
Charcoal-Apple/PyDwarf
|
6f2dd294509c83802e06b9ca5e084f89c9fe4bda
|
[
"Zlib"
] | 13
|
2015-05-23T16:26:31.000Z
|
2021-09-05T18:46:36.000Z
|
import pydwarf
default_options = {
'scales': ('SCALE_TEMPLATE', '[LEATHER][ITEMS_LEATHER][MATERIAL_REACTION_PRODUCT:TAN_MAT:LOCAL_CREATURE_MAT:SCALE]'),
'feathers': ('FEATHER_TEMPLATE', '[PEARL][ITEMS_SOFT]'),
'chitin': ('CHITIN_TEMPLATE', '[SHELL][ITEMS_HARD][ITEMS_SCALED]')
}
@pydwarf.urist(
name = 'pineapple.useablemats',
title = 'Craft With Scales, Feathers, and Chitin',
version = '1.0.0',
author = 'Sophie Kirschner',
description = '''Causes scales, feathers, and chitin to become useful for crafting.
Inspired by/stolen from Rubble's Usable Scale/Feathers/Chitin fixes.''',
arguments = {
'options': '''A dictionary associating option names with tuples where the first
element is the name of a MATERIAL_TEMPLATE and the second is tokens to be
added to that template. Option names, when passed as a keyword argument and
set to False, will cause that option to be disabled.''',
'scales': '''Recognized when using the default options dict. If set to True,
scales will be made to act more like leather for crafting purposes.''',
'feathers': '''Recognized when using the default options dict. If set to True,
feathers will be useable for making soft items, such as clothing.''',
'chitin': '''Recognized when using the default options dict. If set to True,
chitin will be made to act more like shells for crafting purposes.'''
},
compatibility = (pydwarf.df_0_3x, pydwarf.df_0_40)
)
def useable(df, options=default_options, **kwargs):
failures = 0
# Handle each option, simply adding some new tokens to each material template given
for optionname, option in options.iteritems():
pydwarf.log.debug('Handling material template option %s.' % optionname)
if optionname not in kwargs or kwargs[optionname]:
templatename, templatetokens = option
template = df.getobj(type='MATERIAL_TEMPLATE', exact_id=templatename)
if template:
template.addprop(templatetokens)
pydwarf.log.info('Added tokens to %s.' % templatename)
else:
pydwarf.log.error('Couldn\'t find %s.' % templatename)
failures += 1
# All done!
if failures == 0:
return pydwarf.success()
else:
return pydwarf.failure('Failed to add tokens to %d material templates.' % failures)
| 44.963636
| 121
| 0.656692
|
6ae928358b8beef0b0312cdcce2f1a11d78075b5
| 171
|
py
|
Python
|
yatube_api/yatube_api/wsgi.py
|
ilyarogozin/api_final_yatube
|
bb5e15eb66ab109b2fb529c4b47648d7a51f29ee
|
[
"MIT"
] | null | null | null |
yatube_api/yatube_api/wsgi.py
|
ilyarogozin/api_final_yatube
|
bb5e15eb66ab109b2fb529c4b47648d7a51f29ee
|
[
"MIT"
] | null | null | null |
yatube_api/yatube_api/wsgi.py
|
ilyarogozin/api_final_yatube
|
bb5e15eb66ab109b2fb529c4b47648d7a51f29ee
|
[
"MIT"
] | null | null | null |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yatube_api.settings')
application = get_wsgi_application()
| 21.375
| 70
| 0.830409
|
4e58476d0e7ccbb9157fb627d38f8541ffd2a549
| 2,159
|
py
|
Python
|
neo4j_api/test_model_interface.py
|
jasonjimnz/ORT
|
2006e5cdb11850bbe2e5b955cfaaeb08b8ef864d
|
[
"MIT"
] | null | null | null |
neo4j_api/test_model_interface.py
|
jasonjimnz/ORT
|
2006e5cdb11850bbe2e5b955cfaaeb08b8ef864d
|
[
"MIT"
] | null | null | null |
neo4j_api/test_model_interface.py
|
jasonjimnz/ORT
|
2006e5cdb11850bbe2e5b955cfaaeb08b8ef864d
|
[
"MIT"
] | null | null | null |
import random
import requests
from faker import Faker
fake = Faker('es_ES')
class PersonGenerator:
faker_instance = fake
minube_api_key = None
cities_list = []
def __init__(self, minube_api_key, cities=None):
self.minube_api_key = minube_api_key
if cities:
self.cities_list = cities
print("Initialized")
def get_person(cls):
return {
"name": cls.faker_instance.name(),
"last_name": cls.faker_instance.last_name()
}
def get_city(self):
if self.cities_list:
return {
"city": self.cities_list[random.randint(0, len(self.cities_list)-1)]
}
else:
return {
"city": self.faker_instance.city(),
}
def get_single_travel(cls):
return {
"city": cls.faker_instance.city(),
"date": cls.faker_instance.date_time_this_decade().strftime('%Y-%m-%d')
}
def get_preson_travels(cls):
person_travels = {
"person": cls.get_person(),
"home": cls.get_city(),
"travels": [cls.get_single_travel() for x in range(0,10)]
}
return person_travels
def get_activity(cls):
activities = [
"Goes to a museum",
"Goes to a restaurant",
"Goes to a church",
"Goes to a football match",
"Goes to a basketball match",
"Goes to a meeting",
"Explore new places",
"Find new job",
"Buy a property",
"Rent a property",
"Buy a car",
"Visit someone",
"Political event",
"Hobby event",
"Visit a doctor",
"Other"
]
return activities[random.randint(0, len(activities)-1)]
@classmethod
def get_purpose(cls):
purposes = [
"Business",
"Holidays",
"Personal",
"Medical",
"Honeymoon",
"Event",
"Other"
]
return purposes[random.randint(0, len(purposes)-1)]
| 23.725275
| 84
| 0.509032
|
7617fe67eaa90a5537d5abbd75f936a46f4d72bf
| 1,908
|
py
|
Python
|
infomemes/to_from_file.py
|
luiztauffer/infomemes
|
7a54546aa688b703e6e020e53eeabae9dc922619
|
[
"MIT"
] | null | null | null |
infomemes/to_from_file.py
|
luiztauffer/infomemes
|
7a54546aa688b703e6e020e53eeabae9dc922619
|
[
"MIT"
] | null | null | null |
infomemes/to_from_file.py
|
luiztauffer/infomemes
|
7a54546aa688b703e6e020e53eeabae9dc922619
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import json
def save_stepwise_data(sim, fname):
# Simulation metadata
metadata = {
'duration': sim.current_step,
}
metadata.update(sim.config)
file_dict = {}
file_dict['metadata'] = metadata
file_dict['data'] = sim.stepwise_values
with open(fname, 'w') as f:
json.dump(file_dict, f)
def save_light_data(sim, fname):
activated = []
deactivated = []
position_x = []
position_y = []
cov_x = []
cov_y = []
cov_xy = []
meme_production_rate = []
for m in sim.all_media:
if m.active:
deactivated.append(sim.current_step)
else:
deactivated.append(m.deactivated)
activated.append(m.activated)
position_x.append(m.x)
position_y.append(m.y)
cov_x.append(m.cov[0, 0])
cov_y.append(m.cov[1, 1])
cov_xy.append(m.cov[0, 1])
meme_production_rate.append(m.meme_production_rate)
# Arrange data in a DataFrame
data = np.array([activated, deactivated, position_x, position_y, cov_x, cov_y, cov_xy, meme_production_rate]).T
colnames = ['activated', 'deactivated', 'position_x', 'position_y', 'cov_x', 'cov_y', 'cov_xy', 'meme_production_rate']
df = pd.DataFrame(data=data, columns=colnames)
# Simulation metadata
metadata = {
'duration': sim.current_step,
'individuals_xy': [(i.x, i.y) for i in sim.all_individuals],
}
metadata.update(sim.config)
file_dict = {}
file_dict['metadata'] = metadata
file_dict['data'] = df.to_dict()
with open(fname, 'w') as f:
json.dump(file_dict, f)
def load_light_data(fname):
with open(fname, 'r') as f:
file_dict = json.load(f)
df_as_dict = file_dict.pop('data', {})
df = pd.DataFrame().from_dict(df_as_dict)
metadata = file_dict['metadata']
return metadata, df
| 26.136986
| 123
| 0.623166
|
36fc6e8ad2ae01f31c61a039701e70915a3df20e
| 673
|
py
|
Python
|
mercury_ml/tensorflow/model_loading.py
|
mercury-ml-team/mercury-ml
|
8d27816490f0be46f871e889e4635e9223b7044c
|
[
"MIT"
] | 43
|
2019-02-01T15:22:09.000Z
|
2020-02-21T12:51:42.000Z
|
mercury_ml/tensorflow/model_loading.py
|
mercury-ml-team/mercury-ml
|
8d27816490f0be46f871e889e4635e9223b7044c
|
[
"MIT"
] | 17
|
2019-02-15T12:52:18.000Z
|
2019-05-09T15:42:51.000Z
|
mercury_ml/tensorflow/model_loading.py
|
mercury-ml-team/mercury-ml
|
8d27816490f0be46f871e889e4635e9223b7044c
|
[
"MIT"
] | 12
|
2019-02-02T16:48:10.000Z
|
2019-12-16T15:40:15.000Z
|
def load_hdf5_model(local_dir, filename, extension=".h5", custom_objects=None):
"""
Loads a Keras model that was saved in ".h5" format
:param string local_dir: Local directory where the model is saved
:param string filename: Filename with which the model is saved
:param string extension: Extension to the filename with which the model is saved
:param dict custom_objects: Any custom objects (such as custom loss functions) that were included when the model was saved
:return: A Keras model
"""
from tensorflow.keras.models import load_model
return load_model(local_dir + "/" + filename + extension, custom_objects=custom_objects)
| 51.769231
| 127
| 0.744428
|
fd9b66d8186461a6e4cbe8f09a7935d19155cf82
| 462
|
py
|
Python
|
OpenEyetap_Applications/Nanoleaf/nanotest.py
|
JustinLokHinWu/OpenEyeTap
|
cb0b23917663668fb8a95b01417c900d7001f594
|
[
"MIT"
] | null | null | null |
OpenEyetap_Applications/Nanoleaf/nanotest.py
|
JustinLokHinWu/OpenEyeTap
|
cb0b23917663668fb8a95b01417c900d7001f594
|
[
"MIT"
] | null | null | null |
OpenEyetap_Applications/Nanoleaf/nanotest.py
|
JustinLokHinWu/OpenEyeTap
|
cb0b23917663668fb8a95b01417c900d7001f594
|
[
"MIT"
] | null | null | null |
import requests
import json
auth = "akYZIxhpNARKoOr18kDAOy3ihz3JhS8F"
path = "10.0.0.204:16021"
def get_path():
return "http://" + path + "/api/v1/" + auth + "/"
# r = requests.get(get_path())
while(True):
command = input().split(' ')
if(command[0] == "get"):
r = requests.get(get_path() + command[1])
print(r.text)
elif(command[0] == "put"):
r = requests.put(get_path() + command[1], json=json.loads(command[2]))
| 20.086957
| 78
| 0.58658
|
bd0c6ed5b615bda8f08b8258f4fe2e177e6e8d48
| 430
|
py
|
Python
|
packages/python/plotly/plotly/validators/icicle/domain/_row.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/icicle/domain/_row.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/icicle/domain/_row.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class RowValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="row", parent_name="icicle.domain", **kwargs):
super(RowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
| 33.076923
| 81
| 0.644186
|
361ac8d0a619d0ebec5c64949885e9228a71353a
| 251
|
py
|
Python
|
基础教程/A1-Python与基础知识/算法第一步/ExampleCodes/chapter06/6-6.py
|
microsoft/ai-edu
|
2f59fa4d3cf19f14e0b291e907d89664bcdc8df3
|
[
"Apache-2.0"
] | 11,094
|
2019-05-07T02:48:50.000Z
|
2022-03-31T08:49:42.000Z
|
基础教程/A1-Python与基础知识/算法第一步/ExampleCodes/chapter06/6-6.py
|
microsoft/ai-edu
|
2f59fa4d3cf19f14e0b291e907d89664bcdc8df3
|
[
"Apache-2.0"
] | 157
|
2019-05-13T15:07:19.000Z
|
2022-03-23T08:52:32.000Z
|
基础教程/A1-Python与基础知识/算法第一步/ExampleCodes/chapter06/6-6.py
|
microsoft/ai-edu
|
2f59fa4d3cf19f14e0b291e907d89664bcdc8df3
|
[
"Apache-2.0"
] | 2,412
|
2019-05-07T02:55:15.000Z
|
2022-03-30T06:56:52.000Z
|
print("The price of this %s is %d dollars." % ("hotdog", 4.0))
print("The price of this %s is %s dollars." % ("hotdog", 4.0))
print("The price of this %s is %d dollars." % ("meal", 12.25))
print("The price of this %s is %s dollars." % ("meal", 12.25))
| 62.75
| 62
| 0.605578
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.