content
stringlengths 5
1.05M
|
|---|
from AccessControl import ClassSecurityInfo
from DateTime import DateTime
from Products.ATContentTypes.content import schemata
from Products.Archetypes import atapi
from Products.Archetypes.public import *
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import safe_unicode
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.widgets import DateTimeWidget
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaSchema
schema = BikaSchema.copy() + Schema((
ReferenceField('Instrument',
allowed_types=('Instrument',),
relationship='InstrumentMaintenanceTaskInstrument',
widget=StringWidget(
visible=False,
)
),
ComputedField('InstrumentUID',
expression = 'context.getInstrument() and context.getInstrument().UID() or None',
widget=ComputedWidget(
visible=False,
),
),
StringField('Type',
vocabulary = "getMaintenanceTypes",
widget = ReferenceWidget(
checkbox_bound = 0,
label = _("Maintenance type",
"Type"),
),
),
DateTimeField('DownFrom',
with_time = 1,
with_date = 1,
required = 1,
widget = DateTimeWidget(
label=_("From"),
description=_("Date from which the instrument is under maintenance"),
show_hm = True,
),
),
DateTimeField('DownTo',
with_time = 1,
with_date = 1,
widget = DateTimeWidget(
label=_("To"),
description=_("Date until the instrument will not be available"),
show_hm = True,
),
),
StringField('Maintainer',
widget = StringWidget(
label=_("Maintainer"),
description=_("The analyst or agent responsible of the maintenance"),
)
),
TextField('Considerations',
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
label=_("Considerations"),
description=_("Remarks to take into account for maintenance process"),
),
),
TextField('WorkPerformed',
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
label=_("Work Performed"),
description=_("Description of the actions made during the maintenance process"),
),
),
TextField('Remarks',
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
label=_("Remarks"),
),
),
FixedPointField('Cost',
default = '0.00',
widget = DecimalWidget(
label=_("Price"),
),
),
BooleanField('Closed',
default = '0',
widget = BooleanWidget(
label=_("Closed"),
description=_("Set the maintenance task as closed.")
),
),
))
IdField = schema['id']
schema['description'].required = False
schema['description'].widget.visible = True
schema['description'].schemata = 'default'
# Title is not needed to be unique
schema['title'].validators = ()
schema['title']._validationLayer()
class InstrumentMaintenanceTaskStatuses:
CLOSED = 'Closed'
CANCELLED = 'Cancelled'
OVERDUE = "Overdue"
PENDING = "Pending"
INQUEUE = "In queue"
class InstrumentMaintenanceTask(BaseFolder):
security = ClassSecurityInfo()
schema = schema
displayContentsTab = False
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def getMaintenanceTypes(self):
""" Return the current list of maintenance types
"""
types = [('Preventive',safe_unicode(_('Preventive')).encode('utf-8')),
('Repair', safe_unicode(_('Repair')).encode('utf-8')),
('Enhancement', safe_unicode(_('Enhancement')).encode('utf-8'))]
return DisplayList(types)
def getCurrentStateI18n(self):
return safe_unicode(_(self.getCurrentState()).encode('utf-8'))
def getCurrentState(self):
workflow = getToolByName(self, 'portal_workflow')
if self.getClosed():
return InstrumentMaintenanceTaskStatuses.CLOSED
elif workflow.getInfoFor(self, 'cancellation_state', '') == 'cancelled':
return InstrumentMaintenanceTaskStatuses.CANCELLED
else:
now = DateTime()
dfrom = self.getDownFrom()
dto = self.getDownTo() and self.getDownTo() or DateTime(9999, 12, 31)
if (now > dto):
return InstrumentMaintenanceTaskStatuses.OVERDUE
if (now >= dfrom):
return InstrumentMaintenanceTaskStatuses.PENDING
else:
return InstrumentMaintenanceTaskStatuses.INQUEUE
atapi.registerType(InstrumentMaintenanceTask, PROJECTNAME)
|
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for cinder.volume.rpcapi
"""
import ddt
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.backup import fake_backup
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_service
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as tests_utils
from cinder.volume import rpcapi as volume_rpcapi
CONF = cfg.CONF
@ddt.ddt
class VolumeRPCAPITestCase(test.RPCAPITestCase):
def setUp(self):
super(VolumeRPCAPITestCase, self).setUp()
self.rpcapi = volume_rpcapi.VolumeAPI
self.base_version = '3.0'
vol = {}
vol['host'] = 'fake_host'
vol['availability_zone'] = CONF.storage_availability_zone
vol['status'] = "available"
vol['attach_status'] = "detached"
vol['metadata'] = {"test_key": "test_val"}
vol['size'] = 1
volume = db.volume_create(self.context, vol)
kwargs = {
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'display_name': 'fake_name',
'display_description': 'fake_description'}
snapshot = tests_utils.create_snapshot(self.context, vol['id'],
**kwargs)
generic_group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
group_type_id='group_type1',
host='fakehost@fakedrv#fakepool')
group_snapshot = tests_utils.create_group_snapshot(
self.context,
group_id=generic_group.id,
group_type_id=fake.GROUP_TYPE_ID)
self.fake_volume = jsonutils.to_primitive(volume)
self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol)
self.fake_snapshot = snapshot
self.fake_reservations = ["RESERVATION"]
self.fake_backup_obj = fake_backup.fake_backup_obj(self.context)
self.fake_group = generic_group
self.fake_group_snapshot = group_snapshot
self.can_send_version_mock = self.patch(
'oslo_messaging.RPCClient.can_send_version', return_value=True)
def tearDown(self):
super(VolumeRPCAPITestCase, self).tearDown()
self.fake_snapshot.destroy()
self.fake_volume_obj.destroy()
self.fake_group_snapshot.destroy()
self.fake_group.destroy()
self.fake_backup_obj.destroy()
def _change_cluster_name(self, resource, cluster_name):
resource.cluster_name = cluster_name
resource.obj_reset_changes()
def test_create_volume(self):
self._test_rpc_api('create_volume',
rpc_method='cast',
server='fake_host',
volume=self.fake_volume_obj,
request_spec=objects.RequestSpec.from_primitives(
{}),
filter_properties={'availability_zone': 'fake_az'},
allow_reschedule=True)
@ddt.data(None, 'my_cluster')
def test_delete_volume(self, cluster_name):
self._change_cluster_name(self.fake_volume_obj, cluster_name)
self._test_rpc_api('delete_volume',
rpc_method='cast',
server=cluster_name or self.fake_volume_obj.host,
volume=self.fake_volume_obj,
unmanage_only=False,
cascade=False)
def test_delete_volume_cascade(self):
self._test_rpc_api('delete_volume',
rpc_method='cast',
server=self.fake_volume_obj.host,
volume=self.fake_volume_obj,
unmanage_only=False,
cascade=True)
@ddt.data(None, 'mycluster')
def test_create_snapshot(self, cluster_name):
self._change_cluster_name(self.fake_volume_obj, cluster_name)
self._test_rpc_api('create_snapshot',
rpc_method='cast',
server=cluster_name or self.fake_volume_obj.host,
volume=self.fake_volume_obj,
snapshot=self.fake_snapshot)
@ddt.data(None, 'mycluster')
def test_delete_snapshot(self, cluster_name):
self._change_cluster_name(self.fake_snapshot.volume, cluster_name)
self._test_rpc_api(
'delete_snapshot', rpc_method='cast',
server=cluster_name or self.fake_snapshot.volume.host,
snapshot=self.fake_snapshot, unmanage_only=False)
def test_delete_snapshot_with_unmanage_only(self):
self._test_rpc_api('delete_snapshot',
rpc_method='cast',
server=self.fake_snapshot.volume.host,
snapshot=self.fake_snapshot,
unmanage_only=True)
@ddt.data('3.0', '3.3')
def test_attach_volume_to_instance(self, version):
self.can_send_version_mock.return_value = (version == '3.3')
self._test_rpc_api('attach_volume',
rpc_method='call',
server=self.fake_volume_obj.host,
volume=self.fake_volume_obj,
instance_uuid=fake.INSTANCE_ID,
host_name=None,
mountpoint='fake_mountpoint',
mode='ro',
expected_kwargs_diff={
'volume_id': self.fake_volume_obj.id},
retval=fake_volume.fake_db_volume_attachment(),
version=version)
@ddt.data('3.0', '3.3')
def test_attach_volume_to_host(self, version):
self.can_send_version_mock.return_value = (version == '3.3')
self._test_rpc_api('attach_volume',
rpc_method='call',
server=self.fake_volume_obj.host,
volume=self.fake_volume_obj,
instance_uuid=None,
host_name='fake_host',
mountpoint='fake_mountpoint',
mode='rw',
expected_kwargs_diff={
'volume_id': self.fake_volume_obj.id},
retval=fake_volume.fake_db_volume_attachment(),
version=version)
@ddt.data('3.0', '3.3')
def test_attach_volume_cluster(self, version):
self.can_send_version_mock.return_value = (version == '3.3')
self._change_cluster_name(self.fake_volume_obj, 'mycluster')
self._test_rpc_api('attach_volume',
rpc_method='call',
server=self.fake_volume_obj.cluster_name,
volume=self.fake_volume_obj,
instance_uuid=None,
host_name='fake_host',
mountpoint='fake_mountpoint',
mode='rw',
expected_kwargs_diff={
'volume_id': self.fake_volume_obj.id},
retval=fake_volume.fake_db_volume_attachment(),
version=version)
@ddt.data('3.0', '3.4')
def test_detach_volume(self, version):
self.can_send_version_mock.return_value = (version == '3.4')
self._test_rpc_api('detach_volume',
rpc_method='call',
server=self.fake_volume_obj.host,
volume=self.fake_volume_obj,
attachment_id=fake.ATTACHMENT_ID,
expected_kwargs_diff={
'volume_id': self.fake_volume_obj.id},
# NOTE(dulek): Detach isn't returning anything, but
# it's a call and it is synchronous.
retval=None,
version=version)
@ddt.data('3.0', '3.4')
def test_detach_volume_cluster(self, version):
self.can_send_version_mock.return_value = (version == '3.4')
self._change_cluster_name(self.fake_volume_obj, 'mycluster')
self._test_rpc_api('detach_volume',
rpc_method='call',
server=self.fake_volume_obj.cluster_name,
volume=self.fake_volume_obj,
attachment_id='fake_uuid',
expected_kwargs_diff={
'volume_id': self.fake_volume_obj.id},
# NOTE(dulek): Detach isn't returning anything, but
# it's a call and it is synchronous.
retval=None,
version=version)
@ddt.data(None, 'mycluster')
def test_copy_volume_to_image(self, cluster_name):
self._change_cluster_name(self.fake_volume_obj, cluster_name)
self._test_rpc_api('copy_volume_to_image',
rpc_method='cast',
server=cluster_name or self.fake_volume_obj.host,
volume=self.fake_volume_obj,
expected_kwargs_diff={
'volume_id': self.fake_volume_obj.id},
image_meta={'id': fake.IMAGE_ID,
'container_format': 'fake_type',
'disk_format': 'fake_format'})
@ddt.data(None, 'mycluster')
def test_initialize_connection(self, cluster_name):
self._change_cluster_name(self.fake_volume_obj, cluster_name)
self._test_rpc_api('initialize_connection',
rpc_method='call',
server=cluster_name or self.fake_volume_obj.host,
connector='fake_connector',
volume=self.fake_volume_obj)
@ddt.data(None, 'mycluster')
def test_terminate_connection(self, cluster_name):
self._change_cluster_name(self.fake_volume_obj, cluster_name)
self._test_rpc_api('terminate_connection',
rpc_method='call',
server=cluster_name or self.fake_volume_obj.host,
volume=self.fake_volume_obj,
connector='fake_connector',
force=False,
# NOTE(dulek): Terminate isn't returning anything,
# but it's a call and it is synchronous.
retval=None,
expected_kwargs_diff={
'volume_id': self.fake_volume_obj.id})
@ddt.data(None, 'mycluster')
def test_accept_transfer(self, cluster_name):
self._change_cluster_name(self.fake_volume_obj, cluster_name)
self._test_rpc_api('accept_transfer',
rpc_method='call',
server=cluster_name or self.fake_volume_obj.host,
volume=self.fake_volume_obj,
new_user=fake.USER_ID,
new_project=fake.PROJECT_ID,
expected_kwargs_diff={
'volume_id': self.fake_volume_obj.id})
@ddt.data(None, 'mycluster')
def test_extend_volume(self, cluster_name):
self._change_cluster_name(self.fake_volume_obj, cluster_name)
self._test_rpc_api('extend_volume',
rpc_method='cast',
server=cluster_name or self.fake_volume_obj.host,
volume=self.fake_volume_obj,
new_size=1,
reservations=self.fake_reservations)
def test_migrate_volume(self):
class FakeBackend(object):
def __init__(self):
self.host = 'fake_host'
self.cluster_name = 'cluster_name'
self.capabilities = {}
dest_backend = FakeBackend()
self._test_rpc_api('migrate_volume',
rpc_method='cast',
server=self.fake_volume_obj.host,
volume=self.fake_volume_obj,
dest_backend=dest_backend,
force_host_copy=True,
expected_kwargs_diff={
'host': {'host': 'fake_host',
'cluster_name': 'cluster_name',
'capabilities': {}}},
version='3.5')
def test_migrate_volume_completion(self):
self._test_rpc_api('migrate_volume_completion',
rpc_method='call',
server=self.fake_volume_obj.host,
volume=self.fake_volume_obj,
new_volume=self.fake_volume_obj,
error=False,
retval=fake.VOLUME_ID)
def test_retype(self):
class FakeBackend(object):
def __init__(self):
self.host = 'fake_host'
self.cluster_name = 'cluster_name'
self.capabilities = {}
dest_backend = FakeBackend()
self._test_rpc_api('retype',
rpc_method='cast',
server=self.fake_volume_obj.host,
volume=self.fake_volume_obj,
new_type_id=fake.VOLUME_TYPE_ID,
dest_backend=dest_backend,
migration_policy='never',
reservations=self.fake_reservations,
old_reservations=self.fake_reservations,
expected_kwargs_diff={
'host': {'host': 'fake_host',
'cluster_name': 'cluster_name',
'capabilities': {}}},
version='3.5')
def test_manage_existing(self):
self._test_rpc_api('manage_existing',
rpc_method='cast',
server=self.fake_volume_obj.host,
volume=self.fake_volume_obj,
ref={'lv_name': 'foo'})
def test_manage_existing_snapshot(self):
self._test_rpc_api('manage_existing_snapshot',
rpc_method='cast',
server=self.fake_snapshot.volume.host,
snapshot=self.fake_snapshot,
ref='foo',
backend='fake_host')
def test_freeze_host(self):
service = fake_service.fake_service_obj(self.context,
host='fake_host',
binary='cinder-volume')
self._test_rpc_api('freeze_host',
rpc_method='call',
server='fake_host',
service=service,
retval=True)
def test_thaw_host(self):
service = fake_service.fake_service_obj(self.context,
host='fake_host',
binary='cinder-volume')
self._test_rpc_api('thaw_host',
rpc_method='call',
server='fake_host',
service=service,
retval=True)
@ddt.data('3.0', '3.8')
def test_failover(self, version):
self.can_send_version_mock.side_effect = lambda x: x == version
service = objects.Service(self.context, host='fake_host',
cluster_name=None)
expected_method = 'failover' if version == '3.8' else 'failover_host'
self._test_rpc_api('failover', rpc_method='cast',
expected_method=expected_method, server='fake_host',
service=service,
secondary_backend_id='fake_backend',
version=version)
@mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt')
def test_failover_completed(self, cctxt_mock):
service = objects.Service(self.context, host='fake_host',
cluster_name='cluster_name')
self._test_rpc_api('failover_completed', rpc_method='cast',
fanout=True, server='fake_host', service=service,
updates=mock.sentinel.updates)
def test_get_capabilities(self):
self._test_rpc_api('get_capabilities',
rpc_method='call',
server='fake_host',
backend_id='fake_host',
discover=True,
retval={'foo': 'bar'})
def test_remove_export(self):
self._test_rpc_api('remove_export',
rpc_method='cast',
server=self.fake_volume_obj.host,
volume=self.fake_volume_obj,
expected_kwargs_diff={
'volume_id': self.fake_volume_obj.id})
@ddt.data(None, 'mycluster')
def test_get_backup_device(self, cluster_name):
self._change_cluster_name(self.fake_volume_obj, cluster_name)
backup_device_dict = {'backup_device': self.fake_volume,
'is_snapshot': False,
'secure_enabled': True}
backup_device_obj = objects.BackupDeviceInfo.from_primitive(
backup_device_dict, self.context)
self._test_rpc_api('get_backup_device',
rpc_method='call',
server=cluster_name or self.fake_volume_obj.host,
backup=self.fake_backup_obj,
volume=self.fake_volume_obj,
expected_kwargs_diff={
'want_objects': True,
},
retval=backup_device_obj,
version='3.2')
@ddt.data(None, 'mycluster')
def test_get_backup_device_old(self, cluster_name):
self.can_send_version_mock.side_effect = (True, False, False)
self._change_cluster_name(self.fake_volume_obj, cluster_name)
backup_device_dict = {'backup_device': self.fake_volume,
'is_snapshot': False,
'secure_enabled': True}
backup_device_obj = objects.BackupDeviceInfo.from_primitive(
backup_device_dict, self.context)
self._test_rpc_api('get_backup_device',
rpc_method='call',
server=cluster_name or self.fake_volume_obj.host,
backup=self.fake_backup_obj,
volume=self.fake_volume_obj,
retval=backup_device_dict,
expected_retval=backup_device_obj,
version='3.0')
@ddt.data(None, 'mycluster')
def test_secure_file_operations_enabled(self, cluster_name):
self._change_cluster_name(self.fake_volume_obj, cluster_name)
self._test_rpc_api('secure_file_operations_enabled',
rpc_method='call',
server=cluster_name or self.fake_volume_obj.host,
volume=self.fake_volume_obj,
retval=True)
def test_create_group(self):
self._test_rpc_api('create_group', rpc_method='cast',
server='fakehost@fakedrv', group=self.fake_group)
@ddt.data(None, 'mycluster')
def test_delete_group(self, cluster_name):
self._change_cluster_name(self.fake_group, cluster_name)
self._test_rpc_api('delete_group', rpc_method='cast',
server=cluster_name or self.fake_group.host,
group=self.fake_group)
@ddt.data(None, 'mycluster')
def test_update_group(self, cluster_name):
self._change_cluster_name(self.fake_group, cluster_name)
self._test_rpc_api('update_group', rpc_method='cast',
server=cluster_name or self.fake_group.host,
group=self.fake_group,
add_volumes=[fake.VOLUME2_ID],
remove_volumes=[fake.VOLUME3_ID])
def test_create_group_from_src(self):
self._test_rpc_api('create_group_from_src', rpc_method='cast',
server=self.fake_group.host, group=self.fake_group,
group_snapshot=self.fake_group_snapshot,
source_group=None)
def test_create_group_snapshot(self):
self._test_rpc_api('create_group_snapshot', rpc_method='cast',
server=self.fake_group_snapshot.group.host,
group_snapshot=self.fake_group_snapshot)
def test_delete_group_snapshot(self):
self._test_rpc_api('delete_group_snapshot', rpc_method='cast',
server=self.fake_group_snapshot.group.host,
group_snapshot=self.fake_group_snapshot)
@ddt.data(('myhost', None), ('myhost', 'mycluster'))
@ddt.unpack
@mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt')
def test_do_cleanup(self, host, cluster, get_cctxt_mock):
cleanup_request = objects.CleanupRequest(self.context,
host=host,
cluster_name=cluster)
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.do_cleanup(self.context, cleanup_request)
get_cctxt_mock.assert_called_once_with(
cleanup_request.service_topic_queue, '3.7')
get_cctxt_mock.return_value.cast.assert_called_once_with(
self.context, 'do_cleanup', cleanup_request=cleanup_request)
def test_do_cleanup_too_old(self):
cleanup_request = objects.CleanupRequest(self.context)
rpcapi = volume_rpcapi.VolumeAPI()
with mock.patch.object(rpcapi.client, 'can_send_version',
return_value=False) as can_send_mock:
self.assertRaises(exception.ServiceTooOld,
rpcapi.do_cleanup,
self.context,
cleanup_request)
can_send_mock.assert_called_once_with('3.7')
@ddt.data(('myhost', None, '3.10'), ('myhost', 'mycluster', '3.10'),
('myhost', None, '3.0'))
@ddt.unpack
@mock.patch('oslo_messaging.RPCClient.can_send_version')
def test_get_manageable_volumes(
self,
host,
cluster_name,
version,
can_send_version):
can_send_version.side_effect = lambda x: x == version
service = objects.Service(self.context, host=host,
cluster_name=cluster_name)
expected_kwargs_diff = {
'want_objects': True} if version == '3.10' else {}
self._test_rpc_api('get_manageable_volumes',
rpc_method='call',
service=service,
server=cluster_name or host,
marker=5,
limit=20,
offset=5,
sort_keys='fake_keys',
sort_dirs='fake_dirs',
expected_kwargs_diff=expected_kwargs_diff,
version=version)
can_send_version.assert_has_calls([mock.call('3.10')])
@ddt.data(('myhost', None, '3.10'), ('myhost', 'mycluster', '3.10'),
('myhost', None, '3.0'))
@ddt.unpack
@mock.patch('oslo_messaging.RPCClient.can_send_version')
def test_get_manageable_snapshots(
self,
host,
cluster_name,
version,
can_send_version):
can_send_version.side_effect = lambda x: x == version
service = objects.Service(self.context, host=host,
cluster_name=cluster_name)
expected_kwargs_diff = {
'want_objects': True} if version == '3.10' else {}
self._test_rpc_api('get_manageable_snapshots',
rpc_method='call',
service=service,
server=cluster_name or host,
marker=5,
limit=20,
offset=5,
sort_keys='fake_keys',
sort_dirs='fake_dirs',
expected_kwargs_diff=expected_kwargs_diff,
version=version)
can_send_version.assert_has_calls([mock.call('3.10')])
@mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock())
def test_set_log_levels(self):
service = objects.Service(self.context, host='host1')
self._test_rpc_api('set_log_levels',
rpc_method='cast',
server=service.host,
service=service,
log_request='log_request',
version='3.12')
@mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock())
def test_get_log_levels(self):
service = objects.Service(self.context, host='host1')
self._test_rpc_api('get_log_levels',
rpc_method='call',
server=service.host,
service=service,
log_request='log_request',
version='3.12')
@ddt.data(None, 'mycluster')
def test_initialize_connection_snapshot(self, cluster_name):
self._change_cluster_name(self.fake_snapshot.volume, cluster_name)
self._test_rpc_api('initialize_connection_snapshot',
rpc_method='call',
server=(cluster_name or
self.fake_snapshot.volume.host),
connector='fake_connector',
snapshot=self.fake_snapshot,
expected_kwargs_diff={
'snapshot_id': self.fake_snapshot.id},
version='3.13')
@ddt.data(None, 'mycluster')
def test_terminate_connection_snapshot(self, cluster_name):
self._change_cluster_name(self.fake_snapshot.volume, cluster_name)
self._test_rpc_api('terminate_connection_snapshot',
rpc_method='call',
server=(cluster_name or
self.fake_snapshot.volume.host),
snapshot=self.fake_snapshot,
connector='fake_connector',
force=False,
retval=None,
expected_kwargs_diff={
'snapshot_id': self.fake_snapshot.id},
version='3.13')
def test_remove_export_snapshot(self):
self._test_rpc_api('remove_export_snapshot',
rpc_method='cast',
server=self.fake_volume_obj.host,
snapshot=self.fake_snapshot,
expected_kwargs_diff={
'snapshot_id': self.fake_snapshot.id},
version='3.13')
def test_enable_replication(self):
self._test_rpc_api('enable_replication', rpc_method='cast',
server=self.fake_group.host,
group=self.fake_group,
version='3.14')
def test_disable_replication(self):
self._test_rpc_api('disable_replication', rpc_method='cast',
server=self.fake_group.host,
group=self.fake_group,
version='3.14')
def test_failover_replication(self):
self._test_rpc_api('failover_replication', rpc_method='cast',
server=self.fake_group.host,
group=self.fake_group,
allow_attached_volume=False,
secondary_backend_id=None,
version='3.14')
def test_list_replication_targets(self):
self._test_rpc_api('list_replication_targets', rpc_method='call',
server=self.fake_group.host,
group=self.fake_group,
version='3.14')
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from .choices import *
class User(AbstractUser):
email = models.EmailField(null=True, default=None)
USER_TYPE = (
('st', 'Студент'),
('com', 'Компания'),
('unv', 'Университет')
)
user_type = models.CharField(choices=USER_TYPE, max_length=80)
class Student(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
first_name = models.CharField(max_length=40)
last_name = models.CharField(max_length=40)
patronymic = models.CharField(max_length=40, null=True, blank=True)
city = models.CharField(max_length=40, null=True, blank=True)
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class Company(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=80)
foundation_year = models.IntegerField(null=True, default=0)
description = models.TextField(null=True, default=None)
verified = models.BooleanField(default=False)
def __str__(self):
return '%s' % (self.name)
class SubjectArea(models.Model):
title = models.CharField(max_length=40)
def __str__(self):
return '%s' % (self.title)
class Competency(models.Model):
title = models.CharField(max_length=150)
subject_area = models.ForeignKey(SubjectArea, on_delete=models.CASCADE)
def __str__(self):
return '%s' % (self.title)
class StudentCompetency(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE)
competency = models.ForeignKey(Competency, on_delete=models.CASCADE)
who_verified = models.ManyToManyField(User, null=True, blank=True)
class Meta:
unique_together = ('student', 'competency',)
class University(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=40)
foundation_year = models.IntegerField()
description = models.TextField(null=True)
city = models.CharField(max_length=40, null=True, blank=True)
verified = models.BooleanField(default=False)
bg_src = models.CharField(max_length=200, null=True, blank=True, default='http://2bddea4c.ngrok.io/static/img/no-image.jpg')
icon_src = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return '%s' % (self.title)
class UniversityProgram(models.Model):
university = models.ForeignKey(University, on_delete=models.CASCADE)
title = models.CharField(max_length=100)
description = models.TextField(null=True)
degree = models.CharField(choices=DEGREE, default=None, max_length=3)
competencies = models.ManyToManyField(Competency, null=True, default=None)
def __str__(self):
return '%s' % (self.title)
class StudentEducation(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE)
university = models.ForeignKey(University, on_delete=models.CASCADE)
degree = models.CharField(choices=DEGREE, default=None, max_length=3)
program = models.ForeignKey(UniversityProgram, on_delete=models.CASCADE)
year_start = models.IntegerField()
year_end = models.IntegerField()
verified = models.BooleanField(default=False)
class StudentJob(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE)
company = models.ForeignKey(Company, on_delete=models.CASCADE)
year_start = models.IntegerField()
year_end = models.IntegerField()
position = models.CharField(max_length=60)
verified = models.BooleanField(default=False)
class SubjectAreaVacancies(models.Model):
title = models.CharField(max_length=40)
def __str__(self):
return '%s' % (self.title)
class Vacancy(models.Model):
company = models.ForeignKey(Company, on_delete=models.CASCADE)
title = models.CharField(max_length=40)
description = models.TextField(null=True)
competencies = models.ManyToManyField(Competency, null=True, default=None)
subject_area = models.ForeignKey(SubjectAreaVacancies, on_delete=models.CASCADE, null=True, default=None)
|
tile_colors = {}
for _ in range(400):
path = input()
i = 0
pos_north = 0.0
pos_east = 0.0
while i < len(path):
if path[i] == 'e':
pos_east += 1.0
i += 1
elif path[i] == 'w':
pos_east -= 1.0
i += 1
elif path[i] == 'n' and path[i+1] == 'e':
pos_north += 0.5
pos_east += 0.5
i += 2
elif path[i] == 's' and path[i+1] == 'e':
pos_north -= 0.5
pos_east += 0.5
i += 2
elif path[i] == 's' and path[i+1] == 'w':
pos_north -= 0.5
pos_east -= 0.5
i += 2
else:
pos_north += 0.5
pos_east -= 0.5
i += 2
pos = (pos_east, pos_north)
if pos in tile_colors:
tile_colors[pos] = 'black' if tile_colors.get(pos) == 'white' else 'white'
else:
tile_colors[pos] = 'black'
neighbors = [(-1, 0), (1, 0), (0.5, 0.5), (0.5, -0.5), (-0.5, 0.5), (-0.5, -0.5)]
for i in range(100):
min_east = min(map(lambda x: x[0], tile_colors.keys())) - 1.0
max_east = max(map(lambda x: x[0], tile_colors.keys())) + 1.0
min_north = min(map(lambda x: x[1], tile_colors.keys())) - 0.5
max_north = max(map(lambda x: x[1], tile_colors.keys())) + 0.5
a = min_east
while a <= max_east:
b = min_north
while b <= max_north:
if not((a == int(a)) ^ (b == int(b))) and (a, b) not in tile_colors:
tile_colors[(a, b)] = 'white'
b += 0.5
a += 0.5
new_tile = {}
for pos in tile_colors.keys():
counter = 0
for step in neighbors:
neighbor_pos = (pos[0] + step[0], pos[1] + step[1])
if neighbor_pos in tile_colors and tile_colors.get(neighbor_pos) == 'black':
counter += 1
if tile_colors.get(pos) == 'black':
new_tile[pos] = 'white' if counter == 0 or counter > 2 else 'black'
else:
new_tile[pos] = 'black' if counter == 2 else 'white'
tile_colors = new_tile
counter = 0
for pos in tile_colors.keys():
if tile_colors.get(pos) == 'black':
counter += 1
print(counter)
|
#!/usr/bin/env python
__author__ = "Mageswaran Dhandapani"
__copyright__ = "Copyright 2020, The Spark Structured Playground Project"
__credits__ = []
__license__ = "Apache License"
__version__ = "2.0"
__maintainer__ = "Mageswaran Dhandapani"
__email__ = "mageswaran1989@gmail.com"
__status__ = "Education Purpose"
import pandas as pd
import gin
from sklearn.base import BaseEstimator, TransformerMixin
import nltk
from snorkel.labeling import labeling_function
from snorkel.labeling import LFApplier
from snorkel.labeling import LFAnalysis
from snorkel.labeling import LabelModel
from ssp.logger.pretty_print import print_error
from ssp.logger.pretty_print import print_info
from ssp.posgress.dataset_base import PostgresqlDatasetBase
from ssp.utils.ai_key_words import AIKeyWords
class SSPTweetLabeller(BaseEstimator, TransformerMixin):
"""
Snorkel Transformer uses LFs to train a Label Model, that can annotate AI text and non AI text
:param input_col: Name of the input text column if Dataframe is used
:param output_col: Name of the ouput label column if Dataframe is used
"""
# Set voting values.
# all other tweets
ABSTAIN = -1
# tweets that talks about science, AI, data
POSITIVE = 1
# tweets that are not
NEGATIVE = 0
def __init__(self,
input_col="text",
output_col="slabel"):
# LFs needs to be static or normal function
self._labelling_functions = [self.is_ai_tweet,
self.is_not_ai_tweet,
self.not_data_science,
self.not_neural_network,
self.not_big_data,
self.not_nlp,
self.not_ai,
self.not_cv]
self._input_col = input_col
self._output_col = output_col
self._list_applier = LFApplier(lfs=self._labelling_functions)
self._label_model = LabelModel(cardinality=2, verbose=True)
def fit(self, X, y=None):
"""
:param X: (Dataframe) / (List) Input text
:param y: None
:return: Numpy Array [num of samples, num of LF functions]
"""
if isinstance(X, str):
X = [X]
if isinstance(X, pd.DataFrame):
text_list = X[self._input_col]
X_labels = self._list_applier.apply(text_list)
print_info(LFAnalysis(L=X_labels, lfs=self._labelling_functions).lf_summary())
print_info("Training LabelModel")
self._label_model.fit(L_train=X_labels, n_epochs=500, log_freq=100, seed=42)
elif isinstance(X, list):
X_labels = self._list_applier.apply(X)
print_info(LFAnalysis(L=X_labels, lfs=self._labelling_functions).lf_summary())
print_info("Training LabelModel")
self._label_model.fit(L_train=X_labels, n_epochs=500, log_freq=100, seed=42)
else:
raise RuntimeError("Unknown type...")
return self
def normalize_prob(self, res):
return [1 if r > 0.5 else 0 for r in res]
def transform(self, X, y=None):
if isinstance(X, pd.DataFrame):
if self._input_col:
res = self.predict(X[self._input_col])[:, 1]
X[self._output_col] = self.normalize_prob(res)
return X
elif isinstance(X, list):
res = self.predict(X)[:, 1]
return self.normalize_prob(res)
elif isinstance(X, str):
res = self.predict([X])[:, 1]
return self.normalize_prob(res)[0]
def predict(self, X):
return self._label_model.predict_proba(L=self._list_applier.apply(X))
def evaluate(self, X, y):
if isinstance(X, list):
X_labels = self._list_applier.apply(X)
label_model_acc = self._label_model.score(L=X_labels, Y=y, tie_break_policy="random")[
"accuracy"
]
print_info(LFAnalysis(L=X_labels, lfs=self._labelling_functions).lf_summary())
print(f"{'Label Model Accuracy:':<25} {label_model_acc * 100:.1f}%")
elif isinstance(X, pd.DataFrame):
text_list = X[self._input_col]
X_labels = self._list_applier.apply(text_list)
label_model_acc = self._label_model.score(L=X_labels, Y=y, tie_break_policy="random")[
"accuracy"
]
print(f"{'Label Model Accuracy:':<25} {label_model_acc * 100:.1f}%")
else:
raise RuntimeError("Unknown type...")
@staticmethod
def positive_search(data, key_words):
data = data.replace("#", "").replace("@", "")
for keyword in key_words:
if f' {keyword.lower()} ' in f' {data.lower()} ':
return SSPTweetLabeller.POSITIVE
return SSPTweetLabeller.ABSTAIN
@staticmethod
def negative_search(data, positive_keywords, false_positive_keywords):
data = data.replace("#", "").replace("@", "")
positive = False
false_positive = False
for keyword in positive_keywords:
if f' {keyword.lower()} ' in f' {data.lower()} ':
positive = True
for keyword in false_positive_keywords:
if f' {keyword.lower()} ' in f' {data.lower()} ':
false_positive = True
if false_positive and not positive:
# print_info(data)
return SSPTweetLabeller.NEGATIVE
return SSPTweetLabeller.ABSTAIN
@staticmethod
def bigram_check(x, word1, word2):
# Get bigrams and check tuple exists or not
bigrm = list(nltk.bigrams(x.split()))
bigrm = list(map(' '.join, bigrm))
count = 0
for pair in bigrm:
if word1 in pair and word2 not in pair:
count += 1
if count > 0:
return SSPTweetLabeller.NEGATIVE
else:
return SSPTweetLabeller.ABSTAIN
@staticmethod
@labeling_function()
def is_ai_tweet(x):
return SSPTweetLabeller.positive_search(x, AIKeyWords.POSITIVE.split("|"))
@staticmethod
@labeling_function()
def is_not_ai_tweet(x):
return SSPTweetLabeller.negative_search(data=x,
positive_keywords=AIKeyWords.POSITIVE.split("|"),
false_positive_keywords=AIKeyWords.FALSE_POSITIVE.split("|"))
@staticmethod
@labeling_function()
def not_data_science(x):
return SSPTweetLabeller.bigram_check(x, "data", "science")
@staticmethod
@labeling_function()
def not_neural_network(x):
return SSPTweetLabeller.bigram_check(x, "neural", "network")
@staticmethod
@labeling_function()
def not_big_data(x):
return SSPTweetLabeller.bigram_check(x, "big", "data")
@staticmethod
@labeling_function()
def not_nlp(x):
return SSPTweetLabeller.bigram_check(x, "natural", "language")
@staticmethod
@labeling_function()
def not_ai(x):
return SSPTweetLabeller.bigram_check(x, "artificial", "intelligence")
@staticmethod
@labeling_function()
def not_cv(x):
return SSPTweetLabeller.bigram_check(x, "computer", "vision")
@gin.configurable
class SSPLabelEvaluator(PostgresqlDatasetBase):
def __init__(self,
text_column="text",
label_column="label",
raw_tweet_table_name_prefix="raw_tweet_dataset",
postgresql_host="localhost",
postgresql_port="5432",
postgresql_database="sparkstreamingdb",
postgresql_user="sparkstreaming",
postgresql_password="sparkstreaming"):
PostgresqlDatasetBase.__init__(self,
text_column=text_column,
label_output_column=label_column,
raw_tweet_table_name_prefix=raw_tweet_table_name_prefix,
postgresql_host=postgresql_host,
postgresql_port=postgresql_port,
postgresql_database=postgresql_database,
postgresql_user=postgresql_user,
postgresql_password=postgresql_password)
self._snorkel_labeler = SSPTweetLabeller()
def run_labeler(self, version=0):
raw_tweet_dataset_df_deduplicated, test_df, dev_df, \
snorkel_train_df, train_df = self.get_processed_datasets(version=version)
self._snorkel_labeler.fit(snorkel_train_df)
self._snorkel_labeler.evaluate(test_df, test_df[self._label_output_column])
# snorkel_train_df["label"] = snorkel_train_df["text"].apply(lambda x: SSPTweetLabeller.is_ai_tweet(x))
# print_info(snorkel_train_df["label"].value_counts())
# print_error(snorkel_train_df[snorkel_train_df["label"]==0]["text"].tolist()[:10])
# print_info(snorkel_train_df[snorkel_train_df["label"]==1]["text"].tolist()[:10])
# res = self._snorkel_labeler.predict(train_df[self._text_column])
# res = res[:, 1]
# res = [1 if r >= 0.5 else 0 for r in res]
# print_error(train_df.shape[0])
# print_info(sum(res))
# train_df["snorkel_label"] = res
# for label, group in train_df[["text", "snorkel_label"]].groupby("snorkel_label"):
# if label == 1:
# print(label)
# print_info(group.shape[0])
# group.reset_index(inplace=True)
# # print_info("\n".join(group["text"].tolist()[:10]))
# group["label"] = group["text"].apply(lambda x: SSPTweetLabeller.is_ai_tweet(x))
# print_info("\n".join(group[group["label"]==1]["text"].tolist()[:100]))
|
from stack import *
class MinStack:
def __init__(self):
self.dataStk = ArrayStack()
self.minStk = ArrayStack()
def pop(self):
data = self.dataStk.pop()
if data == self.minStk.peek():
self.minStk.pop()
return data
def push(self, val):
if not self.minStk.isEmpty():
if val < self.minStk.peek() or val == self.minStk.peek():
self.minStk.push(val)
else:
self.minStk.push(val)
self.dataStk.push(val)
def getMin(self):
return self.minStk.peek()
def peek(self):
return self.dataStk.peek()
if __name__ == "__main__":
mStk = MinStack()
mStk.push(14)
mStk.push(16)
mStk.push(14)
mStk.push(43)
mStk.push(15)
mStk.push(13)
mStk.push(13)
mStk.push(12)
print("top of mstk ", mStk.peek())
print("min - ", mStk.getMin())
print("pop - ", mStk.pop())
print("min - ", mStk.getMin())
print("pop - ", mStk.pop())
print("min - ", mStk.getMin())
print("pop - ", mStk.pop())
print("min - ", mStk.getMin())
"""
"""
|
import pytest
import json
import bionic as bn
from bionic import interpret
from bionic.utils.urls import (
path_from_url,
is_file_url,
is_gcs_url,
)
class CacheTester:
"""
A helper class for testing changes to Bionic's cache.
Tracks the current and previous states of the Cache API, allow us to express tests
in terms of changes between states.
"""
def __init__(self, flow, tier=["local", "cloud"], gcs_fs=None):
self.flow = flow
self.gcs_fs = gcs_fs
self._old_entries = set()
self._tiers = interpret.str_or_seq_as_list(tier)
def expect_new_entries(self, *expected_new_entity_names):
cur_entries = set(self._get_entries())
assert cur_entries.issuperset(self._old_entries)
new_entries = set(cur_entries) - self._old_entries
self._old_entries = cur_entries
new_entity_names = {entry.entity for entry in new_entries}
assert new_entity_names == set(expected_new_entity_names)
for entry in new_entries:
self._validate_entry(entry)
def expect_removed_entries(self, *expected_removed_entity_names):
cur_entries = set(self._get_entries())
assert cur_entries.issubset(self._old_entries)
removed_entries = self._old_entries - set(cur_entries)
self._old_entries = cur_entries
removed_entity_names = {entry.entity for entry in removed_entries}
assert removed_entity_names == set(expected_removed_entity_names)
def expect_same_entries(self):
assert set(self._get_entries()) == self._old_entries
def expect_zero_entries(self):
assert list(self._get_entries()) == []
def _get_entries(self):
return [
entry
for entry in self.flow.cache.get_entries()
if entry.tier in self._tiers
]
def _validate_entry(self, entry):
artifact_bytes = read_bytes_from_url(entry.artifact_url, self.gcs_fs)
value = json.loads(artifact_bytes)
assert value == self.flow.get(entry.entity)
if entry.tier == "local":
artifact_path_bytes = entry.artifact_path.read_bytes()
assert artifact_path_bytes == artifact_bytes
else:
assert entry.artifact_path is None
# We won't make too many assumptions about the format of the metadata, but we
# can check that it contains the entity name. (Unfortunately it won't
# necessarily contain the absolute artifact URL; it may be a relative URL
# instead.)
# TODO Hmm, is the above true? On closer inspection, it looks like artifact URLs
# are derelativized right away when we load the metadata YAML.
metadata_str = read_bytes_from_url(entry.metadata_url, self.gcs_fs).decode(
"utf-8"
)
assert entry.entity in metadata_str
def read_bytes_from_url(url, gcs_fs):
"""Reads the contents of a URL and returns them as a bytes object."""
if is_file_url(url):
path = path_from_url(url)
return path.read_bytes()
elif is_gcs_url(url):
return gcs_fs.cat_file(url)
else:
raise AssertionError(f"Unexpected scheme in URL: {url}")
@pytest.fixture
def preset_flow(builder):
builder.assign("x", 2)
builder.assign("y", 3)
@builder
def xy(x, y):
return x * y
@builder
def xy_squared(xy):
return xy ** 2
return builder.build()
def test_get_entries(preset_flow):
tester = CacheTester(preset_flow)
tester.expect_zero_entries()
tester.flow.get("x")
tester.expect_new_entries("x")
tester.flow.get("xy")
tester.expect_new_entries("y", "xy")
tester.flow.get("xy_squared")
tester.expect_new_entries("xy_squared")
tester.flow = tester.flow.setting("x", 4)
tester.flow.get("xy_squared")
tester.expect_new_entries("x", "xy", "xy_squared")
builder = tester.flow.to_builder()
@builder # noqa: F811
@bn.version(1)
def xy(x, y): # noqa: F811
return x ** y
tester.flow = builder.build()
tester.flow.get("xy_squared")
tester.expect_new_entries("xy", "xy_squared")
def test_entry_delete(preset_flow):
tester = CacheTester(preset_flow)
tester.flow.get("xy_squared")
tester.expect_new_entries("x", "y", "xy", "xy_squared")
(x_entry,) = [
entry for entry in tester.flow.cache.get_entries() if entry.entity == "x"
]
assert x_entry.delete()
tester.expect_removed_entries("x")
assert not x_entry.delete()
(xy_entry,) = [
entry for entry in tester.flow.cache.get_entries() if entry.entity == "xy"
]
assert xy_entry.delete()
tester.expect_removed_entries("xy")
assert not xy_entry.delete()
tester.flow = tester.flow.to_builder().build()
tester.flow.get("xy_squared")
tester.expect_new_entries("x", "xy")
def test_flow_handles_delete_gracefully(builder):
builder.assign("a", 1)
@builder
@bn.memoize(False)
def b(a):
return a + 1
@builder
@bn.memoize(False)
def c(b):
return b + 1
flow = builder.build()
flow.get("b")
(b_entry,) = [entry for entry in flow.cache.get_entries() if entry.entity == "b"]
b_entry.delete()
# The goal here is to make sure that Bionic correctly updates its cache state,
# detects that `b` is deleted, and recomputes it.
flow.get("c")
@pytest.mark.needs_parallel
@pytest.mark.allows_parallel
def test_flow_handles_intermediate_deletion_in_parallel(builder, make_counter):
builder.assign("x", 2)
@builder
@bn.persist(False)
def y(x):
return x + 1
@builder
def z(y):
return y + 1
flow = builder.build()
assert flow.get("x") == 2
assert flow.get("y") == 3
for entry in flow.cache.get_entries():
entry.delete()
assert flow.get("z") == 4
@pytest.mark.allows_parallel
def test_flow_handles_partial_tuple_deletion(builder, make_counter):
builder.assign("x", 2)
@builder
@bn.outputs("y", "z")
def _(x):
return x - 1, x + 1
flow = builder.build()
assert flow.get("y") == 1
assert flow.get("z") == 3
(z_entry,) = [entry for entry in flow.cache.get_entries() if entry.entity == "z"]
z_entry.delete()
assert flow.get("y") == 1
assert flow.get("z") == 3
def test_delete_artifact_with_multiple_metadata_files(builder):
builder.assign("a", 1)
@builder
@bn.memoize(False)
def b(a):
return 2
# Define `c` several times, each with non-functional version differences. This means
# each defintion will have a new metadata entry but the same artifact, so we'll have
# many entries pointing to the same artifact.
c_entries = []
for i in range(4):
@builder
@bn.memoize(False)
@bn.version(minor=i)
def c(b):
return b + 1
flow = builder.build()
flow.get("c")
(c_entry,) = [
entry
for entry in flow.cache.get_entries()
if entry.entity == "c" and entry not in c_entries
]
c_entries.append(c_entry)
# All the entries should have different metadata files.
assert len(set(entry.metadata_url for entry in c_entries)) == len(c_entries)
# But they should all share the same artifact file.
assert len(set(entry.artifact_url for entry in c_entries)) == 1
# This deletes the artifact and returns True.
assert c_entries[0].delete()
# The artifact is already deleted, so this returns False.
assert not c_entries[1].delete()
# This should attempt to load the last entry, detect that the artifact is missing,
# and recompute `c`.
flow.get("c")
# Finally, when we look at the cache again, the last of the original entries should
# be dropped, leaving only the most recent entry. (We computed a new artifact file,
# but it will have a different (random) URL, so the old entry will still be
# invalid.)
(final_c_entry,) = [
entry for entry in flow.cache.get_entries() if entry.entity == "c"
]
assert final_c_entry.artifact_path.exists()
# It would be nice if we could parameterize the above tests to run with or without GCS.
# However, it doesn't seem to be possible to have a parametrized fixture where only some
# of the variations depend on other fixtures; this is important because the GCS fixtures
# have important setup/teardown properties that we only want to trigger if GCS is
# enabled. (In theory it seems like `request.getfixturevalue` should be able to do
# this, but it has some kind of interaction with the parametrization of
# `parallel_execution_enabled` and breaks.) I think the way forward might be to make
# the GCS setup/teardown into `autouse` fixtures that are directly activated by the GCS
# command line flag.
@pytest.mark.needs_gcs
def test_cache_on_gcs(gcs_builder, gcs_fs):
builder = gcs_builder
builder.assign("a", 1)
@builder
def b(a):
return a + 1
@builder
def c(b):
return b + 1
flow = builder.build()
local_tester = CacheTester(flow, tier="local", gcs_fs=gcs_fs)
cloud_tester = CacheTester(flow, tier="cloud", gcs_fs=gcs_fs)
total_tester = CacheTester(flow, tier=["local", "cloud"], gcs_fs=gcs_fs)
local_tester.expect_zero_entries()
cloud_tester.expect_zero_entries()
total_tester.expect_zero_entries()
flow.get("b")
local_tester.expect_new_entries("a", "b")
cloud_tester.expect_new_entries("a", "b")
total_tester.expect_new_entries("a", "a", "b", "b")
flow.get("c")
local_tester.expect_new_entries("c")
cloud_tester.expect_new_entries("c")
total_tester.expect_new_entries("c", "c")
(local_b_entry,) = [
entry
for entry in flow.cache.get_entries()
if entry.entity == "b" and entry.tier == "local"
]
local_b_entry.delete()
local_tester.expect_removed_entries("b")
cloud_tester.expect_same_entries()
total_tester.expect_removed_entries("b")
(cloud_c_entry,) = [
entry
for entry in flow.cache.get_entries()
if entry.entity == "c" and entry.tier == "cloud"
]
cloud_c_entry.delete()
local_tester.expect_same_entries()
cloud_tester.expect_removed_entries("c")
total_tester.expect_removed_entries("c")
|
from math import log, sqrt
from numpy import exp, linspace, fft, array, arange, pi
import matplotlib.pyplot as plt
i = complex(0, 1)
# model parameters
T = 1
H_original = 90.0 # limit
K_original = 100.0 # strike
r_premia = 10 # annual interest rate
r = log(r_premia/100 + 1)
V0 = 0.316227766
sigma = V0
gamma = r - 0.5 * sigma**2 # Black-Scholes parameter
# constants in log scale for price, by Zanette
M = 2**12 # number of points in price grid
L = 2 # scaling coefficient
x_min = L * log(0.5)
x_max = L * log(2.0)
d = (x_max - x_min) / M # discretization step
K = log(K_original/H_original)
H = log(H_original/H_original)
x_space = [] # prices array
for p_elem_index in range(M):
x_space.append(x_min + p_elem_index * d)
original_prices_array = H_original * exp(x_space)
# time discretization
N = 50 # number_of_time_steps
delta_t = T/N
q = 1.0/delta_t + r
factor = (q*delta_t)**(-1)
# beta_plus and beta_minus
beta_minus = - (gamma + sqrt(gamma**2 + 2*sigma**2 * q))/sigma**2
beta_plus = - (gamma - sqrt(gamma**2 + 2*sigma**2 * q))/sigma**2
print(beta_minus)
print(beta_plus)
def G(x):
"""payoff_function for a given option type (down-and-out put there)"""
if (H <= x) and (x <= K):
return K_original - H_original*exp(x)
else:
return 0
f_0 = array([G(x_element) for x_element in x_space]) # = g(x) i.e. payoff func
xi_space = fft.fftfreq(M, d=d)
# factor functions
phi_plus_array = array([beta_plus/(beta_plus - i*2*pi*xi) for xi in xi_space])
phi_minus_array = array([-beta_minus/(-beta_minus + i*2*pi*xi) for xi in xi_space])
def indicator(a):
"""the indicator influences the function argument, not value.
So here it iterates through x-domain and cuts any
values of function with an argument less than H"""
indicated = []
for index in range(len(x_space)):
if x_space[index] > H:
indicated.append(a[index])
else:
indicated.append(0)
return indicated
# main cycle (iterating over time)
f_n = array(f_0)
#plt.plot(original_prices_array, f_n)
for i in range(N):
f_n_plus_1 = factor * fft.ifft(phi_minus_array *
fft.fft(indicator(fft.ifft(phi_plus_array * fft.fft(f_n)))))
f_n = f_n_plus_1
plt.plot(original_prices_array, f_n_plus_1)
plt.savefig("out.png")
plt.show()
# file output section
outfile = open('out.txt', 'w')
outfile.write("asset price; answer\n")
for elem in list(zip(original_prices_array, f_n_plus_1)):
outfile.write(str(elem[0]) + ',')
outfile.write(str(elem[1].real) + ',')
outfile.write(str(elem[1].imag) + ',')
outfile.write('\n')
# for elem in xi_space:
# outfile.write(str(elem))
# outfile.write('\n')
outfile.close()
|
"""Integration tests for the pyWriter project.
Test the conversion of the chapter descriptions.
For further information see https://github.com/peter88213/PyWriter
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
from pywriter.html.html_chapterdesc import HtmlChapterDesc
from pywriter.odt.odt_chapterdesc import OdtChapterDesc
from pywriter.test.import_export_test import ImportExportTest
import unittest
class NrmOpr(ImportExportTest, unittest.TestCase):
_importClass = HtmlChapterDesc
_exportClass = OdtChapterDesc
# The test methods must be defined here to identify the source of failure.
def test_yw7_to_exp(self):
super().test_yw7_to_exp()
def test_yw7_to_exp_ui(self):
super().test_yw7_to_exp_ui()
def test_imp_to_yw7(self):
super().test_imp_to_yw7()
def test_imp_to_yw7_ui(self):
super().test_imp_to_yw7_ui()
def test_data(self):
super().test_data()
def main():
unittest.main()
if __name__ == '__main__':
main()
|
from os.path import join
from jpype import JClass, JString, getDefaultJVMPath, shutdownJVM, startJVM
def Normalize(query):
ZEMBEREK_PATH: str = join('Zemberek','bin', 'zemberek-full.jar')
startJVM(
getDefaultJVMPath(),
'-ea',
f'-Djava.class.path={ZEMBEREK_PATH}',
convertStrings=False
)
TurkishMorphology: JClass = JClass('zemberek.morphology.TurkishMorphology')
TurkishSentenceNormalizer: JClass = JClass(
'zemberek.normalization.TurkishSentenceNormalizer'
)
Paths: JClass = JClass('java.nio.file.Paths')
normalizer = TurkishSentenceNormalizer(
TurkishMorphology.createWithDefaults(),
Paths.get(
join('Zemberek','ZemberekData', 'normalization')
),
Paths.get(
join('Zemberek','ZemberekData', 'lm', 'lm.2gram.slm')
)
)
norm = normalizer.normalize(JString(query))
print((
f'\nNoisy : {query}'
f'\nNormalized : {normalizer.normalize(JString(query))}\n'
))
return norm
shutdownJVM()
|
"""
Vertically partitioned SplitNN implementation
Worker 1 has two segments of the model and the Images
Worker 2 has a segment of the model and the Labels
"""
import torch
from torch import nn
import syft as sy
hook = sy.TorchHook(torch)
class SplitNN:
def __init__(self, models, optimizers):
self.models = models
self.optimizers = optimizers
self.data = []
self.remote_tensors = []
def forward(self, x):
data = []
remote_tensors = []
data.append(models[0](x))
if data[-1].location == models[1].location:
remote_tensors.append(data[-1].detach().requires_grad_())
else:
remote_tensors.append(
data[-1].detach().move(models[1].location).requires_grad_()
)
i = 1
while i < (len(models) - 1):
data.append(models[i](remote_tensors[-1]))
if data[-1].location == models[i + 1].location:
remote_tensors.append(data[-1].detach().requires_grad_())
else:
remote_tensors.append(
data[-1].detach().move(models[i + 1].location).requires_grad_()
)
i += 1
data.append(models[i](remote_tensors[-1]))
self.data = data
self.remote_tensors = remote_tensors
return data[-1]
def backward(self):
data = self.data
remote_tensors = self.remote_tensors
i = len(models) - 2
while i > -1:
if remote_tensors[i].location == data[i].location:
grads = remote_tensors[i].grad.copy()
else:
grads = remote_tensors[i].grad.copy().move(data[i].location)
data[i].backward(grads)
i -= 1
def zero_grads(self):
for opt in self.optimizers:
opt.zero_grad()
def step(self):
for opt in self.optimizers:
opt.step()
# Define our model segments
INPUT_SIZE = 784
hidden_sizes = [128, 640]
OUTPUT_SIZE = 10
models = [
nn.Sequential(
nn.Linear(INPUT_SIZE, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
),
nn.Sequential(nn.Linear(hidden_sizes[1], OUTPUT_SIZE), nn.LogSoftmax(dim=1)),
]
# # Send Model Segments to model locations
# model_locations = [worker1, worker2]
# for model, location in zip(models, model_locations):
# model.send(location)
|
import sys
import json
import pprint
import os
from prism_building import Prism_Building
from present import Present
def main():
#box_perimeter_core("../cuboid-idf-test/box_perimeter_core_100_200_15.idf", 100, 200, 15, 5, True)
#L_shaped_perimeter_core("../cuboid-idf-test/L_perimeter_core_100_200_15.idf", 100, 200, 45, 45, 15, 5, True)
#box_simple("../cuboid-idf-test/box_simple_20_40.idf", 20, 40, 1, False)
#E_shaped("../cuboid-idf-test/E_building.idf", 250, 150, 40, 100, 20, 2, False)
#Wedge_shaped("../cuboid-idf-test/Wedge.idf", 70, 100, 15, 60, 40, 30, 2, True)
# some testing related to json files for osw manipulation
#json_osw_test(0.1)
#json_osw_test(0.2)
#json_osw_test(0.3)
present = Present()
#present.test_pptx()
present.test_pptx02()
present.test_pptx03()
#prism = Prism_Building()
#prism.test_object_order()
def box_perimeter_core(file_name, width, depth, zone_depth, number_above_ground_stories, has_basement):
# the form of the box with perimeter and core zones
#
# b---------c /\
# |\ /| |
# | f-----g | |
# | | | | depth
# | e-----h | |
# |/ \| |
# a---------d \/
#
# <--width-->
if (width - 2 * zone_depth < 1) or (depth - 2 * zone_depth < 1):
print("width {}, depth {} and zone_depth {} are not consistent.".format(width, depth, zone_depth))
prism = Prism_Building()
# exterior wall corners
prism.add_corner("a", 0, 0)
prism.add_corner("b", 0, depth)
prism.add_corner("c", width, depth)
prism.add_corner("d", width, 0)
# interior wall corners
prism.add_corner("e", zone_depth, zone_depth)
prism.add_corner("f", zone_depth, depth - zone_depth)
prism.add_corner("g", width - zone_depth, depth - zone_depth)
prism.add_corner("h", width - zone_depth, zone_depth)
# exterior walls
prism.add_exterior_wall("a", "b")
prism.add_exterior_wall("b", "c")
prism.add_exterior_wall("c", "d")
prism.add_exterior_wall("d", "a")
# interior walls
prism.add_interior_wall("e", "f")
prism.add_interior_wall("f", "g")
prism.add_interior_wall("g", "h")
prism.add_interior_wall("h", "e")
prism.add_interior_wall("a", "e")
prism.add_interior_wall("b", "f")
prism.add_interior_wall("c", "g")
prism.add_interior_wall("d", "h")
# plan zones
prism.add_plan_zone(["a", "b", "f", "e"])
prism.add_plan_zone(["b", "c", "g", "f"])
prism.add_plan_zone(["c", "d", "h", "g"])
prism.add_plan_zone(["d", "a", "e", "h"])
prism.add_plan_zone(["e", "f", "g", "h"])
prism.number_of_above_grade_stories = number_above_ground_stories
prism.has_basement = has_basement
prism.add_occupancy_types("default",10.76,"BLDG_LIGHT_SCH",10.76,"BLDG_EQUIP_SCH",18.58,"BLDG_OCC_SCH")
prism.add_hvac_types("default",'Furnace_DX',3.0,0.8)
prism.create_idf(file_name)
def L_shaped_perimeter_core(file_name, width, depth, end1, end2, zone_depth, number_above_ground_stories, has_basement):
# <----end1---->
#
# b------------c /\
# |\ /| |
# | g--------h | |
# | | | | |
# | | | | |
# | | | d---------e /\ depth
# | | |/ /| | |
# | | i---------j | | |
# | | / | | end2 |
# | l------------------k | | |
# |/ \| | |
# a----------------------f \/ \/
#
# <---------width-------->
if (width - end1 < 1) or (depth - end2 < 1):
print("width {}, depth {}, end1 {}, and end2 {} are not consistant".format(width, depth, end1, end2))
if (end1 - 2 * zone_depth < 1) or (end2 - 2 * zone_depth < 1):
print("end1 {}, end2 {} and zone_depth {} are not consistant".format(end1, end2, zone_depth))
prism = Prism_Building()
prism.add_corner("a", 0, 0)
prism.add_corner("b", 0, depth)
prism.add_corner("c", end1, depth)
prism.add_corner("d", end1, end2)
prism.add_corner("e", width, end2)
prism.add_corner("f", width, 0)
prism.add_corner("g", zone_depth, depth - zone_depth)
prism.add_corner("h", end1 - zone_depth, depth - zone_depth)
prism.add_corner("i", end1 - zone_depth, end2 - zone_depth)
prism.add_corner("j", width - zone_depth, end2 - zone_depth)
prism.add_corner("k", width - zone_depth, zone_depth)
prism.add_corner("l", zone_depth, zone_depth)
prism.add_exterior_wall("a", "b")
prism.add_exterior_wall("b", "c")
prism.add_exterior_wall("c", "d")
prism.add_exterior_wall("d", "e")
prism.add_exterior_wall("e", "f")
prism.add_exterior_wall("f", "a")
prism.add_interior_wall("g", "h")
prism.add_interior_wall("h", "i")
prism.add_interior_wall("i", "j")
prism.add_interior_wall("j", "k")
prism.add_interior_wall("k", "l")
prism.add_interior_wall("l", "g")
prism.add_interior_wall("a", "l")
prism.add_interior_wall("b", "g")
prism.add_interior_wall("c", "h")
prism.add_interior_wall("d", "i")
prism.add_interior_wall("e", "j")
prism.add_interior_wall("f", "k")
prism.add_interior_wall("i", "l") #subdivide the interior so convex
prism.add_plan_zone(["a", "b", "g", "l"])
prism.add_plan_zone(["b", "c", "h", "g"])
prism.add_plan_zone(["c", "d", "i", "h"])
prism.add_plan_zone(["d", "e", "j", "i"])
prism.add_plan_zone(["e", "f", "k", "j"])
prism.add_plan_zone(["l", "k", "f", "a"])
# interior zone is split in two so they are convex
prism.add_plan_zone(["l", "g", "h", "i"])
prism.add_plan_zone(["i", "j", "k", "l"])
prism.number_of_above_grade_stories = number_above_ground_stories
prism.has_basement = has_basement
prism.add_occupancy_types("default",10.76,"BLDG_LIGHT_SCH",10.76,"BLDG_EQUIP_SCH",18.58,"BLDG_OCC_SCH")
prism.add_hvac_types("default",'IdealLoadsAirSystem', 1, 1)
prism.create_idf(file_name)
def box_simple(file_name, width, depth, number_above_ground_stories, has_basement):
#
# b---------c /\
# | | |
# | | |
# | | depth
# | | |
# | | |
# a---------d \/
#
# <--width-->
#
prism = Prism_Building()
prism.add_corner("a", 0, 0)
prism.add_corner("b", 0, depth)
prism.add_corner("c", width, depth)
prism.add_corner("d", width, 0)
prism.add_exterior_wall("a", "b")
prism.add_exterior_wall("b", "c")
prism.add_exterior_wall("c", "d")
prism.add_exterior_wall("d", "a")
prism.add_plan_zone(["a", "b", "c", "d"])
prism.number_of_above_grade_stories = number_above_ground_stories
prism.has_basement = has_basement
prism.add_occupancy_types("default",10.76,"BLDG_LIGHT_SCH",10.76,"BLDG_EQUIP_SCH",18.58,"BLDG_OCC_SCH")
prism.add_hvac_types("default",'Furnace_DX',3.0,0.8)
prism.create_idf(file_name)
def E_shaped(file_name, width, depth, end, wing, last, number_above_ground_stories, has_basement):
# <--end--> <--end--> <--end-->
#
# a5--b5--c5 d5--e5--f5 g5--h5--i5 /\ /\ /\
# | | X | | | X | | | X | | | |
# | | | | | | | | | last | |
# a4--b4--c4 d4--e4--f4 g4--h4--i4 \/ | |
# | | | | | | | | | wing |
# | | | | | | | | | | |
# | | X | | | X | | | X | | |
# | | | | | | | | | | depth
# | | | | | | | | | | |
# a3--b3--c3------d3--e3--f3------g3--h3--i3 \/ |
# | | | | X | |
# | | | | | |
# a2------c2----------e2----------g2------i2 |
# | | | | X | |
# | | | | | |
# a1------c1----------e1----------g1------i1 \/
#
# <-------------width------------------->
#
prism = Prism_Building()
prism.add_corner("a1", 0, 0)
prism.add_corner("a2", 0, (depth - wing) / 2)
prism.add_corner("a3", 0, depth - wing)
prism.add_corner("a4", 0, depth - last)
prism.add_corner("a5", 0, depth)
prism.add_corner("b3", end / 2, depth - wing)
prism.add_corner("b4", end / 2, depth - last)
prism.add_corner("b5", end / 2, depth)
prism.add_corner("c1", end, 0)
prism.add_corner("c2", end, (depth - wing) / 2)
prism.add_corner("c3", end, depth - wing)
prism.add_corner("c4", end, depth - last)
prism.add_corner("c5", end, depth)
prism.add_corner("d3", width / 2 - end / 2, depth - wing)
prism.add_corner("d4", width / 2 - end / 2, depth - last)
prism.add_corner("d5", width / 2 - end / 2, depth)
prism.add_corner("e1", width / 2, 0)
prism.add_corner("e2", width / 2, (depth - wing) / 2)
prism.add_corner("e3", width / 2, depth - wing)
prism.add_corner("e4", width / 2, depth - last)
prism.add_corner("e5", width / 2, depth)
prism.add_corner("f3", width / 2 + end / 2, depth - wing)
prism.add_corner("f4", width / 2 + end / 2, depth - last)
prism.add_corner("f5", width / 2 + end / 2, depth)
prism.add_corner("g1", width - end, 0)
prism.add_corner("g2", width - end, (depth - wing) / 2)
prism.add_corner("g3", width - end, depth - wing)
prism.add_corner("g4", width - end, depth - last)
prism.add_corner("g5", width - end, depth)
prism.add_corner("h3", width - end / 2, depth - wing)
prism.add_corner("h4", width - end / 2, depth - last)
prism.add_corner("h5", width - end / 2, depth)
prism.add_corner("i1", width, 0)
prism.add_corner("i2", width, (depth - wing) / 2)
prism.add_corner("i3", width, depth - wing)
prism.add_corner("i4", width, depth - last)
prism.add_corner("i5", width, depth)
prism.add_exterior_wall("a1", "a2")
prism.add_exterior_wall("a2", "a3")
prism.add_exterior_wall("a3", "a4")
prism.add_exterior_wall("a4", "a5")
prism.add_exterior_wall("a5", "b5")
prism.add_exterior_wall("b5", "c5")
prism.add_exterior_wall("c5", "c4")
prism.add_exterior_wall("c4", "c3")
prism.add_exterior_wall("c3", "d3")
prism.add_exterior_wall("d3", "d4")
prism.add_exterior_wall("d4", "d5")
prism.add_exterior_wall("d5", "e5")
prism.add_exterior_wall("e5", "f5")
prism.add_exterior_wall("f5", "f4")
prism.add_exterior_wall("f4", "f3")
prism.add_exterior_wall("f3", "g3")
prism.add_exterior_wall("g3", "g4")
prism.add_exterior_wall("g4", "g5")
prism.add_exterior_wall("g5", "h5")
prism.add_exterior_wall("h5", "i5")
prism.add_exterior_wall("i5", "i4")
prism.add_exterior_wall("i4", "i3")
prism.add_exterior_wall("i3", "i2")
prism.add_exterior_wall("i2", "i1")
prism.add_exterior_wall("i1", "g1")
prism.add_exterior_wall("g1", "e1")
prism.add_exterior_wall("e1", "c1")
prism.add_exterior_wall("c1", "a1")
prism.add_interior_wall("a2", "c2")
prism.add_interior_wall("c2", "e2")
prism.add_interior_wall("e2", "g2")
prism.add_interior_wall("g2", "i2")
prism.add_interior_wall("a3", "b3")
prism.add_interior_wall("b3", "c3")
prism.add_interior_wall("d3", "e3")
prism.add_interior_wall("e3", "f3")
prism.add_interior_wall("g3", "h3")
prism.add_interior_wall("h3", "i3")
prism.add_interior_wall("a4", "b4")
prism.add_interior_wall("b4", "c4")
prism.add_interior_wall("d4", "e4")
prism.add_interior_wall("e4", "f4")
prism.add_interior_wall("g4", "h4")
prism.add_interior_wall("h4", "i4")
prism.add_interior_wall("b3", "b4")
prism.add_interior_wall("b4", "b5")
prism.add_interior_wall("c1", "c2")
prism.add_interior_wall("c2", "c3")
prism.add_interior_wall("e1", "e2")
prism.add_interior_wall("e2", "e3")
prism.add_interior_wall("e3", "e4")
prism.add_interior_wall("e4", "e5")
prism.add_interior_wall("g1", "g2")
prism.add_interior_wall("g2", "g3")
prism.add_interior_wall("h3", "h4")
prism.add_interior_wall("h4", "h5")
prism.add_plan_zone(["a1", "a2", "c2", "c1"])
prism.add_plan_zone(["c1", "c2", "e2", "e1"])
prism.add_plan_zone(["e1", "e2", "g2", "g1"])
prism.add_plan_zone(["g1", "g2", "i2", "i1"])
prism.add_plan_zone(["a2", "a3", "b3", "c3", "c2"])
prism.add_plan_zone(["c2", "c3", "d3", "e3", "e2"])
prism.add_plan_zone(["e2", "e3", "f3", "g3", "g2"])
prism.add_plan_zone(["g2", "g3", "h3", "i3", "i2"])
prism.add_plan_zone(["a3", "a4", "b4", "b3"])
prism.add_plan_zone(["b3", "b4", "c4", "c3"])
prism.add_plan_zone(["d3", "d4", "e4", "e3"])
prism.add_plan_zone(["e3", "e4", "f4", "f3"])
prism.add_plan_zone(["g3", "g4", "h4", "h3"])
prism.add_plan_zone(["h3", "h4", "i4", "i3"])
prism.add_plan_zone(["a4", "a5", "b5", "b4"])
prism.add_plan_zone(["b4", "b5", "c5", "c4"])
prism.add_plan_zone(["d4", "d5", "e5", "e4"])
prism.add_plan_zone(["e4", "e5", "f5", "f4"])
prism.add_plan_zone(["g4", "g5", "h5", "h4"])
prism.add_plan_zone(["h4", "h5", "i5", "i4"])
prism.number_of_above_grade_stories = number_above_ground_stories
prism.has_basement = has_basement
prism.add_occupancy_types("default",10.76,"BLDG_LIGHT_SCH",10.76,"BLDG_EQUIP_SCH",18.58,"BLDG_OCC_SCH")
# prism.add_hvac_types("default",'Furnace_DX',3.0,0.8)
prism.add_hvac_types("default",'IdealLoadsAirSystem',1,1)
prism.create_idf(file_name)
def Wedge_shaped(file_name, width, depth, zone_depth, lfdep, rtdep, offset, number_above_ground_stories, has_basement):
#
# <offset>
# c /\
# /|\ |
# / | \ |
# / | \ |
# / h \ |
# / / \ \ |
# /\ b / \ \ depth
# | |\ / \ \ |
# | | g \ d /\ |
# | | | i/ | | |
# lfdep| | | | rtdep |
# | | | | | | |
# | | f--------j | | |
# | |/ \ | | |
# \/ a-------------e \/ \/
#
# <----width---->
#
prism = Prism_Building()
prism.add_corner("a", 0, 0)
prism.add_corner("b", 0, lfdep)
prism.add_corner("c", offset, depth)
prism.add_corner("d", width, rtdep)
prism.add_corner("e", width, 0)
prism.add_corner("f", zone_depth, zone_depth)
prism.add_corner("g", zone_depth, lfdep - zone_depth / 2)
prism.add_corner("h", offset, depth - zone_depth)
prism.add_corner("i", width - zone_depth, rtdep - zone_depth / 2)
prism.add_corner("j", width - zone_depth, zone_depth)
prism.add_exterior_wall("a", "b")
prism.add_exterior_wall("b", "c")
prism.add_exterior_wall("c", "d")
prism.add_exterior_wall("d", "e")
prism.add_exterior_wall("e", "a")
prism.add_interior_wall("f", "g")
prism.add_interior_wall("g", "h")
prism.add_interior_wall("h", "i")
prism.add_interior_wall("i", "j")
prism.add_interior_wall("j", "f")
prism.add_interior_wall("a", "f")
prism.add_interior_wall("b", "g")
prism.add_interior_wall("c", "h")
prism.add_interior_wall("d", "i")
prism.add_interior_wall("e", "j")
prism.add_plan_zone(["a", "b", "g", "f"])
prism.add_plan_zone(["b", "c", "h", "g"])
prism.add_plan_zone(["c", "d", "i", "h"])
prism.add_plan_zone(["d", "e", "j", "i"])
prism.add_plan_zone(["j", "e", "a", "f"])
prism.add_plan_zone(["f", "g", "h", "i", "j"])
prism.number_of_above_grade_stories = number_above_ground_stories
prism.has_basement = has_basement
prism.add_occupancy_types("default",10.76,"BLDG_LIGHT_SCH",10.76,"BLDG_EQUIP_SCH",18.58,"BLDG_OCC_SCH")
prism.add_hvac_types("default",'Furnace_DX',3.0,0.8)
prism.create_idf(file_name)
def json_osw_test(wwr):
pp = pprint.PrettyPrinter(indent=4)
with open('D:/projects/SBIR SimArchImag/5 SimpleBox/os-test/emptyCLI/workflow-min.osw') as f:
data = json.load(f)
#print(data)
#pp.pprint(data)
steps = data['steps']
for step in steps:
if step['measure_dir_name'] == "AddRemoveOrReplaceWindowsCopy":
#print(step)
arguments = step['arguments']
#print(arguments['wwr'])
arguments['wwr'] = wwr
try:
os.mkdir('D:/projects/SBIR SimArchImag/5 SimpleBox/os-test/emptyCLI/{}'.format(wwr))
except FileExistsError:
print('directory already exists')
with open('D:/projects/SBIR SimArchImag/5 SimpleBox/os-test/emptyCLI/{}/workflow-min-{}.osw'.format(wwr, wwr),'w') as o:
json.dump(data, o, indent=3, sort_keys=True)
if __name__ == '__main__':
sys.exit(main())
|
# Generated by Django 3.1.3 on 2021-01-01 18:33
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0036_auto_20201231_1205'),
]
operations = [
migrations.AlterField(
model_name='mentor',
name='mentorship_duration',
field=models.IntegerField(default=6, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(24)]),
),
]
|
# Download the Python helper library from twilio.com/docs/libraries/python
from datetime import date
from twilio.rest import TwilioRestClient
# Your Account Sid and Auth Token from twilio.com/console
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = TwilioRestClient(account_sid, auth_token)
# A list of message objects with filtering
messages = client.messages.list(
to='+15558675310', from_='+15017122661', date_sent=date(2016, 8, 31)
)
|
import pytest
from gaphor import UML
from gaphor.core.modeling import ElementFactory
from gaphor.plugins.xmiexport.exportmodel import XMIExport
@pytest.fixture
def element_factory():
ef = ElementFactory()
ef.create(UML.Package).name = "package"
c1 = ef.create(UML.Class)
c1.name = "class"
c2 = ef.create(UML.Class)
c2.name = "class"
i1 = ef.create(UML.Interface)
i1.name = "interface"
c1.ownedAttribute = ef.create(UML.Property)
c1.ownedAttribute[0].name = "attr"
c1.ownedOperation = ef.create(UML.Operation)
c1.ownedOperation[0].name = "oper"
c1.ownedOperation[0].ownedParameter = ef.create(UML.Parameter)
UML.recipes.create_dependency(c1, c2)
UML.recipes.create_generalization(c1, c2)
UML.recipes.create_association(c1, c2)
return ef
def test_xmi_export(element_factory, tmp_path):
exporter = XMIExport(element_factory)
f = tmp_path / "test.gaphor"
exporter.export(f)
content = f.read_text()
assert '<XMI xmi.version="2.1"' in content
|
import logging
import re
import shutil
from pathlib import Path
import pdfplumber
import requests
from bs4 import BeautifulSoup
from openpyxl import load_workbook
from .. import utils
from ..cache import Cache
__authors__ = ["zstumgoren", "Dilcia19", "ydoc5212"]
__tags__ = ["html", "pdf", "excel"]
logger = logging.getLogger(__name__)
def scrape(
data_dir: Path = utils.WARN_DATA_DIR,
cache_dir: Path = utils.WARN_CACHE_DIR,
) -> Path:
"""
Scrape data from California.
Compiles a single CSV for CA using historical PDFs and an Excel file for the current fiscal year.
Only regenerates the CSV if a PDF or the Excel file have changed.
Keyword arguments:
data_dir -- the Path were the result will be saved (default WARN_DATA_DIR)
cache_dir -- the Path where results can be cached (default WARN_CACHE_DIR)
Returns: the Path where the file is written
"""
output_csv = data_dir / "ca.csv"
# Set up cache dir for state
cache_state = Path(cache_dir, "ca")
cache_state.mkdir(parents=True, exist_ok=True)
# Initially write to a temp file in cache_dir before
# over-writing prior output_csv, so we can use append
# mode while avoiding data corruption if script errors out
temp_csv = cache_state / "ca_temp.csv"
# Create Cache instance for downstream operations
cache = Cache(cache_dir)
# Update pdfs and Excel files
files_have_changed = _update_files(cache)
output_headers = [
"notice_date",
"effective_date",
"received_date",
"company",
"city",
"num_employees",
"layoff_or_closure",
"county",
"address",
"source_file",
]
if files_have_changed:
logger.info("One or more source data files have changed")
logger.info("Extracting Excel data for current fiscal year")
wb_path = cache.files(subdir="ca", glob_pattern="*.xlsx")[0]
excel_data = _extract_excel_data(wb_path)
# Write mode when processing Excel
utils.write_dict_rows_to_csv(
temp_csv, output_headers, excel_data, mode="w", extrasaction="ignore"
)
logger.info("Extracting PDF data for prior years")
for pdf in cache.files(subdir="ca", glob_pattern="*.pdf"):
logger.info(f"Extracting data from {pdf}")
data = _extract_pdf_data(pdf)
# Append mode when processing PDFs
utils.write_dict_rows_to_csv(
temp_csv, output_headers, data, mode="a", extrasaction="ignore"
)
# If all went well, copy temp csv over pre-existing output csv
utils.create_directory(output_csv, is_file=True)
shutil.copy2(temp_csv, output_csv)
return output_csv
def _update_files(cache):
files_have_changed = False
# Create lookup of pre-existing PDF files and their size
files = {}
for local_file in cache.files(subdir="ca/"):
fname = local_file.split("/")[-1]
extension = fname.split(".")[-1]
if extension in ["pdf", "xlsx"]:
files[fname] = Path(local_file).stat().st_size
# Download file if it has changed or not present.
links = _get_file_links()
for link in links:
file_name = link["url"].split("/")[-1]
download_status = False
# If file doesn't exist, update download status
try:
local_size = files[file_name]
except KeyError:
download_status = True
local_size = None
# If size doesn't match, update download status
if local_size != link["size"]:
download_status = True
if download_status is True:
files_have_changed = True
cache.download(f"ca/{file_name}", link["url"])
# Delete local files whose names don't match
# data files on remote site, in order to guard against
# duplicates if the source agency renames files
for obsolete_file in _obsolete_local_files(files, links):
files_have_changed = True
logger.info(
f"Deleting local file no longer present on source site: {obsolete_file}"
)
Path(cache.path, f"ca/{obsolete_file}").unlink()
return files_have_changed
def _get_file_links():
"""Get links to historical PDFs and the Excel file."""
logger.info("Getting metadata for data files")
base_url = "https://edd.ca.gov/Jobs_and_Training"
home_page = f"{base_url}/Layoff_Services_WARN.htm"
html = utils.get_url(home_page).text
soup = BeautifulSoup(html, "html.parser")
links = []
for link in soup.find_all("a"):
relative_file_url = link.attrs.get("href", "").strip()
if _is_warn_report_link(relative_file_url):
file_url = f"{base_url}/{relative_file_url}"
meta = _get_file_metadata(file_url)
links.append(meta)
return links
def _is_warn_report_link(url):
return True if re.search(r"warn[-_]?report", url, re.I) else False
def _get_file_metadata(url):
return {"url": url, "size": int(requests.head(url).headers["Content-Length"])}
def _extract_excel_data(wb_path):
wb = load_workbook(filename=wb_path)
# Get the only worksheet
ws = wb.worksheets[0]
rows = [row for row in ws.rows]
# Throw away initial rows until we reach first data row
while True:
row = rows.pop(0)
first_cell = row[0].value.strip().lower()
if first_cell.startswith("county"):
break
payload = []
for row in rows:
first_cell = row[0].value.strip().lower()
# Exit if we've reached summary row at bottom
if first_cell == "report summary":
break
# Spreadsheet contains merged cells so index
# positions below are not sequential
data = {
"county": row[0].value.strip(),
"notice_date": _convert_date(row[1].value),
"received_date": _convert_date(row[2].value),
"effective_date": _convert_date(row[4].value),
"company": row[5].value.strip(),
"layoff_or_closure": row[8].value.strip(),
"num_employees": row[10].value,
"address": row[12].value.strip(),
"source_file": wb_path.split("/")[-1],
}
payload.append(data)
return payload
def _convert_date(dt):
return dt.strftime("%m/%d/%Y")
def _extract_pdf_data(pdf_path):
headers = [
"notice_date",
"effective_date",
"received_date",
"company",
"city",
"county",
"num_employees",
"layoff_or_closure",
"source_file",
]
data = []
with pdfplumber.open(pdf_path) as pdf:
for idx, page in enumerate(pdf.pages):
# All pages pages except last should have a single table
# Last page has an extra summary table, but indexing
# for the first should avoid grabbing the summary data
rows = page.extract_tables()[0]
# Remove header row on first page
# and update the standardized "headers" var if the source
# data has no county field, as in the case of
# files covering 07/2016-to-06/2017 fiscal year and earlier
if idx == 0:
raw_header = rows.pop(0)
raw_header_str = "-".join([col.strip().lower() for col in raw_header])
if "county" not in raw_header_str:
headers.remove("county")
# Skip if it's a summary table (this happens
# when summary is only table on page, as in 7/2019-6/2020)
first_cell = rows[0][0].strip().lower()
if "summary" in first_cell:
continue
for row in rows:
data_row = dict(zip(headers, row))
# Data clean-ups
data_row.update(
{
"effective_date": data_row["effective_date"].replace(" ", ""),
"received_date": data_row["received_date"].replace(" ", ""),
"source_file": pdf_path.split("/")[-1],
}
)
data.append(data_row)
return data
def _obsolete_local_files(pdfs, links):
pdfs_uniq = set(pdfs.keys())
remote_files = {link["url"].split("/")[-1] for link in links}
return pdfs_uniq - remote_files
if __name__ == "__main__":
scrape()
|
#import nbformat
from nbparameterise import (extract_parameters, replace_definitions,parameter_values)
import notebooktoall.transform as trans
#with open("masso.ipynb") as f:
# nb = nbformat.read(f,as_version=4)
#orig_parameters = extract_parameters(nb)
#params = parameter_values(orig_parameters, filename = './dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv')
#new_nb = replace_definitions(nb, params)
#with open("masso2.ipynb","w") as f:
# nbformat.write(new_nb,f)
trans.transform_notebook(".\\analysis_v0.2.ipynb",export_list=['html'])
#nodeof = trans.get_notebook('.\masso2.ipynb')
#trans.write_files(['html'],nodeof,'..\analysis_v0.2.html')
|
# Generated by Django 3.2 on 2022-02-01 07:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0004_remove_goods_design_image_path'),
]
operations = [
migrations.RemoveField(
model_name='goods_info',
name='image_path',
),
migrations.RemoveField(
model_name='goods_result',
name='result_path',
),
migrations.AddField(
model_name='goods_info',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='upload_image'),
),
migrations.AddField(
model_name='goods_result',
name='description',
field=models.CharField(blank=True, db_column='description', max_length=100),
),
migrations.AddField(
model_name='goods_result',
name='design_code',
field=models.IntegerField(blank=True, db_column='design_code', null=True, unique=True),
),
migrations.AddField(
model_name='goods_result',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='result'),
),
migrations.AddField(
model_name='goods_result',
name='title',
field=models.CharField(blank=True, db_column='title', max_length=100),
),
migrations.AlterField(
model_name='goods_design',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='ai_designs'),
),
migrations.CreateModel(
name='profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_no', models.CharField(db_column='phone_no', default='', max_length=100, null=True)),
('name', models.CharField(blank=True, db_column='name', max_length=100, null=True)),
('user_type', models.CharField(blank=True, db_column='user_type', max_length=100, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'profile',
},
),
]
|
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
FIELDS = ['first_name', 'last_name', 'email', 'username', ]
class RegistrationForm(UserCreationForm):
"""
Customise the User Registration form from default Django UserCreationForm
"""
class Meta:
model = get_user_model()
fields = FIELDS
|
import asyncio
from process_pixel import get_global_opacity, process_pixel, set_global_opacity
def off(strip):
for i in range(strip.numPixels()):
strip.setPixelColor(i, process_pixel(0))
strip.show()
async def off_animated(strip):
opacity = get_global_opacity()
for i in range(10, 0, -1):
set_global_opacity(opacity / 10 * i)
await asyncio.sleep(0.05)
off(strip)
set_global_opacity(opacity)
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_api
class PresubmitApi(recipe_api.RecipeApi):
@property
def presubmit_support_path(self):
return self.package_repo_resource('presubmit_support.py')
def __call__(self, *args, **kwargs):
"""Return a presubmit step."""
name = kwargs.pop('name', 'presubmit')
env = self.m.step.get_from_context('env', {})
env.setdefault('PATH', '%(PATH)s')
env['PATH'] = self.m.path.pathsep.join([
env['PATH'], str(self._module.PACKAGE_REPO_ROOT)])
with self.m.step.context({'env': env}):
return self.m.python(
name, self.presubmit_support_path, list(args), **kwargs)
|
from __future__ import absolute_import, division, print_function
import sys
import h5py
import numpy as np
from scitbx import matrix
from scitbx.array_family import flex
from dxtbx.format.Format import Format
from dxtbx.format.FormatHDF5 import FormatHDF5
from dxtbx.format.FormatStill import FormatStill
class FormatHDF5Sacla(FormatHDF5, FormatStill):
"""
Class for reading SACLA images created by the DataConvert SACLA
script (this script lives on the SACLA hpc).
This assumes the argument -reconstr was passed to
DataConvert in order to Reconstruct the image.
Also, this processes only a single run's worth of data in the hdf5
"""
@staticmethod
def understand(image_file):
with h5py.File(image_file, "r") as h5_handle:
understood = False
if "file_info" in h5_handle and "run_number_list" in h5_handle["file_info"]:
run_grp = FormatHDF5Sacla._get_run_h5group(h5_handle)
if "detector_2d_assembled_1" in list(run_grp.keys()):
understood = True
return understood
def _start(self):
self._h5_handle = h5py.File(self.get_image_file(), "r")
self._run = FormatHDF5Sacla._get_run_h5group(self._h5_handle)
event_info = self._run["event_info"]
tag_number_list = event_info["tag_number_list"]
self._images = ["tag_%d" % tag for tag in tag_number_list]
@staticmethod
def _get_run_h5group(h5_handle):
"""returns the first run group found"""
file_info = h5_handle["file_info"]
run_number_list = file_info["run_number_list"]
run_str = "run_%d" % run_number_list[0]
return h5_handle[run_str]
def _detector(self, index=None):
# Get the pixel and image size
detector_2d_assembled_1 = self._run["detector_2d_assembled_1"]
detector_info = detector_2d_assembled_1["detector_info"]
pixel_size = (
detector_info["pixel_size_in_micro_meter"][0] / 1000,
detector_info["pixel_size_in_micro_meter"][1] / 1000,
)
tag = detector_2d_assembled_1[self._images[0]]
data = tag["detector_data"][()]
# detector_image_size is fast-, slow- , just in case the dataset is ever non-square
image_size = (data.shape[1], data.shape[0])
trusted_range = (0, 200000)
# Initialise detector frame
fast = matrix.col((1.0, 0.0, 0.0))
slow = matrix.col((0.0, -1.0, 0.0))
orig = matrix.col(
(
-image_size[0] * pixel_size[0] / 2,
image_size[1] * pixel_size[1] / 2,
-100.0,
)
)
# Make the detector
return self._detector_factory.make_detector(
"", fast, slow, orig, pixel_size, image_size, trusted_range
)
def _beam(self, index=None):
run_info = self._run["run_info"]
sacla_config = run_info["sacla_config"]
eV = sacla_config["photon_energy_in_eV"].value
return self._beam_factory.simple(12398.4 / eV)
def get_num_images(self):
return len(self._images)
def get_raw_data(self, index=0):
detector_2d_assembled_1 = self._run["detector_2d_assembled_1"]
tag = detector_2d_assembled_1[self._images[index]]
return flex.double(tag["detector_data"].value.astype(np.float64))
def get_detectorbase(self, index=None):
raise NotImplementedError
def get_image_file(self, index=None):
return Format.get_image_file(self)
def get_detector(self, index=None):
return self._detector_instance
def get_beam(self, index=None):
return self._beam_instance
if __name__ == "__main__":
for arg in sys.argv[1:]:
print(FormatHDF5Sacla.understand(arg))
|
class Solution:
def winnerOfGame(self, colors: str) -> bool:
if len(colors) < 3:
return False
Alice = 0
Bob = 0
for i in range(1, len(colors)-1):
if colors[i-1] == colors[i] and colors[i] == colors[i+1]:
if colors[i] == 'A':
Alice += 1
else:
Bob += 1
if Alice > Bob:
return True
else:
return False
|
import json
from pathlib import Path
import pygame
from tkinter import filedialog
import objects as object_types
from settings import SCREEN_DIMS, WINDOW_CONST
class roomObject():
def __init__(self,
color = (0, 0, 255, 1),
rect = None, # or (x, y, w, h)
circle = None, # or (x, y, r)
outline = 0, # is width for circles
text = None,
objType = "room",
footprint = [0, 0, 1, 1],
textColor = (255, 255, 255),
textSize = 16):
self.color = color
self.rect = rect
self.circle = circle
self.outline = outline
self.font = pygame.font.Font("fonts/Roboto-Regular.ttf", textSize)
self.textColor = textColor
self.text = text
self.objType = objType
self.footprint = footprint
if text is not None:
self.text = bytes(str(text), "utf-8")
if rect is None and circle is None:
self.rect = [0, 0, 0, 0]
def draw(self, surface):
if self.rect is None:
x = self.circle[0]
y = self.circle[1]
r = self.circle[2]
pygame.draw.circle(surface, self.color, (x, y), r, self.outline)
else:
x = self.rect[0]
y = self.rect[1]
pygame.draw.rect(surface, self.color, self.rect, self.outline)
if self.text is not None:
text_surface = self.font.render(self.text, 1, self.textColor)
surface.blit(text_surface, (x, y))
def getAttributes(self):
text = ""
if self.text != None:
text = self.text.decode("utf-8")
attributes = {
"color": self.color,
"rect": self.rect,
"circle": self.circle,
"outline": self.outline,
"textColor": self.textColor,
"text": text,
"objType": self.objType,
"footprint": self.footprint
}
return attributes
def getPos(self):
return self.footprint[0:2]
def setPos(self, x, y, xs, ys, dims):
if xs + self.footprint[2] > dims[0] or ys + self.footprint[3] > dims[1]:
return False
if self.rect is None:
self.circle = [x, y, self.circle[2]]
self.footprint = [xs, ys] + self.footprint[2:]
else:
self.rect = [x, y] + self.rect[2:]
self.footprint = [xs, ys] + self.footprint[2:]
def setText(self, text):
if text is not None:
self.text = bytes(str(text), "utf-8")
class gridSpace():
def __init__(self,
color = (200, 200, 200, 1),
highlightColor = (210, 210, 210, 1),
rect = [0, 0, 50, 50],
outline = 1,
objectsHere = None,
highlighted = False,
text = None,
textSize = 16):
self.color = color
self.defaultColor = color
self.highlightColor = highlightColor
self.rect = rect
self.outline = outline
self.defaultOutline = outline
self.objectsHere = objectsHere
if objectsHere is None:
self.objectsHere = set()
self.highlighted = highlighted
self.font = pygame.font.Font("fonts/Roboto-Regular.ttf", textSize)
self.text = text
if text is not None:
self.text = bytes(str(text), "utf-8")
def addObject(self, obj):
self.objectsHere.add(obj)
def draw(self, surface):
if self.text is not None:
text_surface = self.font.render(self.text, 1, (0, 0, 0))
surface.blit(text_surface, self.rect)
pygame.draw.rect(surface, self.color, self.rect, self.outline)
def getCenter(self):
return [self.rect[0] + self.rect[2] // 2, self.rect[1] + self.rect[3] // 2]
def highlight(self, color = None):
if color is None:
color = self.highlightColor
self.setColor(color)
self.setOutline(0)
self.highlighted = True
def getObjects(self):
return self.objectsHere
def removeObject(self, obj):
self.objectsHere.remove(obj)
def setColor(self, color):
self.color = color
def setRect(self, rect):
self.rect = rect
def setOutline(self, outline):
self.outline = outline
def toggleHighlight(self, color = None):
if self.highlighted:
self.unhighlight()
else:
self.highlight(color = color)
def unhighlight(self):
self.setColor(self.defaultColor)
self.setOutline(self.defaultOutline)
self.highlighted = False
class grid():
def __init__(self,
width = 5,
height = 5,
totalWidth = 500,
totalHeight = 500,
numbers = False,
color = (200, 200, 200, 1),
file_name = "__RENAME__",
title = "New room",
gridTextSize = 16,
textSize = 16):
self.file_name = file_name
self.title = title
self.dims = [width, height]
self.totalDims = [totalWidth, totalHeight]
self.spaceDims = [totalWidth // width, totalHeight // height]
self.hoverSpace = [-1, -1]
self.lockedSpace = [-1, -1]
self.waitFunction = []
self.dead = False
self.gridTextSize = gridTextSize
self.textSize = textSize
self.objects = set()
self.createGridSpaces(width, height)
def addObject(self, obj):
self.objects.add(obj)
for w in range(obj.footprint[2]):
for h in range(obj.footprint[3]):
if len(self.gridSpaces) > obj.footprint[0] + w:
if len(self.gridSpaces[obj.footprint[0] + w]) > obj.footprint[1] + h:
self.gridSpaces[obj.footprint[0] + w][obj.footprint[1] + h].addObject(obj)
def createGridSpaces(self, width, height):
self.gridSpaces = list()
for w in range(width):
self.gridSpaces.append(list())
for h in range(height):
text = None
if h == 0:
text = w + 1
elif w == 0:
text = h + 1
self.gridSpaces[w].append(
gridSpace(rect=[w * self.spaceDims[0],
h * self.spaceDims[1],
self.spaceDims[0],
self.spaceDims[1]],
text = text,
textSize = self.gridTextSize))
def draw(self, surface):
# Draw grid spaces
for w in range(self.dims[0]):
for h in range(self.dims[1]):
self.gridSpaces[w][h].draw(surface)
# Draw objects
for obj in self.objects:
obj.draw(surface)
def export(self, path=None):
"""Export this project to JPG or PNG"""
save = self.getSaveData()
roomGrid = grid(self.dims[0], self.dims[1], 2048, 2048, True, gridTextSize=32, textSize=48)
roomGrid.setData(save)
surface = pygame.Surface((2048, 2048))
surface.fill((255, 255, 255))
roomGrid.draw(surface)
if path is None:
# Have to make resizable so file dialog appears
if WINDOW_CONST == pygame.FULLSCREEN:
pygame.display.set_mode(SCREEN_DIMS, pygame.RESIZABLE)
path = filedialog.asksaveasfilename(title="Choose a file location and name.", filetypes=[("JPEG", ".jpg"), ("PNG", ".png")], defaultextension=".jpg")
if WINDOW_CONST == pygame.FULLSCREEN:
pygame.display.set_mode(SCREEN_DIMS, pygame.FULLSCREEN)
try:
pygame.image.save(surface, path)
return True
except:
return False
def getCoords(self, spaceCoords, center=False):
"""Returns the rectangle of a space, or the x, y center coords, based on space coordinates"""
space = self.gridSpaces[spaceCoords[0]][spaceCoords[1]]
if center:
return space.getCenter()
else:
return space.rect
def getSaveData(self):
"""Returns the save data of this project"""
save = {
"meta": {
"title": self.title,
"dimensions": self.dims
},
"objects": self.saveObjects()
}
return save
def getSmallestAt(self, location, objType):
"""Returns the smallest item at a given gridspace"""
if location[0] < 0 or location[1] < 0:
return False
smallestObj = None
for obj in self.gridSpaces[location[0]][location[1]].getObjects():
if obj.objType == objType or objType == "any":
# Compare areas - smaller area should get picked
if smallestObj is None \
or (obj.footprint[2] * obj.footprint[3] < smallestObj.footprint[2] * smallestObj.footprint[3]):
smallestObj = obj
if smallestObj is None:
return False
else:
return smallestObj
def getSpace(self, cursorCoords):
"""Returns the space coordinates w,h based on cursor coordinates."""
space = [0, 0]
# X dimension
if cursorCoords[0] >= self.spaceDims[0] * self.dims[0]:
space[0] = self.dims[0] - 1
elif cursorCoords[0] > 0:
space[0] = cursorCoords[0] // self.spaceDims[0]
# Y dimension
if cursorCoords[1] >= self.spaceDims[1] * self.dims[1]:
space[1] = self.dims[1] - 1
elif cursorCoords[1] > 0:
space[1] = cursorCoords[1] // self.spaceDims[1]
return space
def highlight(self, spaceCoords, exclusive = False, color = (210, 210, 210, 1)):
"""Highlights a space"""
if exclusive:
for w in range(self.dims[0]):
for h in range(self.dims[1]):
self.gridSpaces[w][h].unhighlight()
self.gridSpaces[spaceCoords[0]][spaceCoords[1]].highlight(color)
self.hoverSpace = list(spaceCoords)
def lockSpace(self):
"""Locks a space while waiting for text to process"""
self.lockedSpace = self.hoverSpace
def moveObject(self, objType, location, to_location):
"""Moves the first instance of object with objType that exists at location"""
obj = self.getSmallestAt(location, objType)
if obj != False and (len(to_location) == 3 or (to_location[0] > -1 and to_location[1] > -1)) \
and to_location[0] < self.dims[0] and to_location[1] < self.dims[1]:
if len(to_location) == 3 and to_location[2] == "relative":
orig = obj.getPos()
to_location = [orig[0] + to_location[0], orig[1] + to_location[1]]
if to_location[0] < 0 or to_location[1] < 0 or to_location[0] >= self.dims[0] or to_location[1] >= self.dims[1]:
return False
self.removeObject(objType, location)
# If circle, use get_coords
if obj.circle is not None:
coords = self.getCoords(to_location, True)
obj.setPos(coords[0], coords[1], to_location[0], to_location[1], self.dims)
elif obj.rect is not None:
obj.setPos(to_location[0] * self.spaceDims[0], to_location[1] * self.spaceDims[1], to_location[0], to_location[1], self.dims)
self.addObject(obj)
return True
else:
return False
def openFile(self, path):
"""Opens a saved room design. Returns file's name."""
self.file_name = path
with open(path) as f:
data = json.load(f)
self.setData(data)
def removeObject(self, objType, location):
"""Removes the first instance of object with objType that exists at location"""
obj = self.getSmallestAt(location, objType)
if obj != False:
for w in range(obj.footprint[2]):
for h in range(obj.footprint[3]):
self.gridSpaces[obj.footprint[0] + w][obj.footprint[1] + h].removeObject(obj)
self.objects.remove(obj)
return True
else:
return False
def renameObject(self, objType, location, text):
"""Renames the first instance of object with objType that exists at location"""
obj = self.getSmallestAt(location, objType)
if obj != False:
obj.setText(text)
return True
else:
return False
def resizeObject(self, objType, location, size):
"""Resizes the first instance of object with objType that exists at location to the size provided."""
obj = self.getSmallestAt(location, objType)
if obj != False:
obj = obj.getAttributes()
# Circles are always 1x1
if obj["rect"] is None:
return False
self.removeObject(objType, location)
# Make sure size is positive
f = obj["footprint"]
f[2] = abs(size[0])
f[3] = abs(size[1])
# If too big, make it as big as possible
if f[0] + f[2] >= self.dims[0]:
f[2] = self.dims[0] - f[0]
if f[1] + f[3] >= self.dims[1]:
f[3] = self.dims[1] - f[1]
obj["rect"] = [f[0] * self.spaceDims[0],
f[1] * self.spaceDims[1],
f[2] * self.spaceDims[0],
f[3] * self.spaceDims[1]]
roomObj = roomObject(color=obj["color"],
rect=obj["rect"],
circle=obj["circle"],
outline=obj["outline"],
text=obj["text"],
textColor=obj["textColor"],
objType=obj["objType"],
footprint=obj["footprint"],
textSize=self.textSize)
self.addObject(roomObj)
return True
else:
return False
def saveFile(self, path = None):
"""Saves object list as JSON to given path"""
if path is None:
path = self.file_name
if path == "__RENAME__":
# Have to make resizable so file dialog appears
if WINDOW_CONST == pygame.FULLSCREEN:
pygame.display.set_mode(SCREEN_DIMS, pygame.RESIZABLE)
path = filedialog.asksaveasfilename(title="Choose a file location and name.", filetypes=[("JSON", ".json")], defaultextension=".json")
title = Path(path).stem
if WINDOW_CONST == pygame.FULLSCREEN:
pygame.display.set_mode(SCREEN_DIMS, pygame.FULLSCREEN)
self.file_name = path
self.title = title
save = self.getSaveData()
try:
with open(path, "w") as f:
json.dump(save, f)
return True
except:
return False
def saveObjects(self):
"""Saves objects to a list"""
objects = list()
for obj in self.objects:
objects.append(obj.getAttributes())
return objects
def setData(self, data):
"""Set project data"""
self.title = data["meta"]["title"]
self.dims = data["meta"]["dimensions"]
self.spaceDims = [self.totalDims[0] // self.dims[0], self.totalDims[1] // self.dims[1]]
self.createGridSpaces(self.dims[0], self.dims[1])
for obj in data["objects"]:
# Footprint matters here, to adjust for different-sized screens.
if obj["rect"] is not None:
f = obj["footprint"]
obj["rect"] = [f[0] * self.spaceDims[0],
f[1] * self.spaceDims[1],
f[2] * self.spaceDims[0],
f[3] * self.spaceDims[1]]
if obj["circle"] is not None:
f = obj["footprint"]
obj["circle"] = self.getCoords(f[0:2], True) + [self.spaceDims[0] // 2]
roomObj = roomObject(color=obj["color"],
rect=obj["rect"],
circle=obj["circle"],
outline=obj["outline"],
text=obj["text"],
textColor=obj["textColor"],
objType=obj["objType"],
footprint=obj["footprint"],
textSize=self.textSize)
self.addObject(roomObj)
def setFileName(self, name):
"""Set file output path"""
self.file_name = name
def setWaitFunction(self, name, params):
"""Set up a waiting function."""
if name is None:
self.waitFunction = []
else:
self.waitFunction = [name, params]
def toggleHighlight(self, spaceCoords, color = (210, 210, 210, 1)):
"""Toggles a space to highlight (or not)"""
self.gridSpaces[spaceCoords[0]][spaceCoords[1]].toggleHighlight(color)
def unhighlight(self, spaceCoords):
"""Unhighlights a space"""
self.gridSpaces[spaceCoords[0]][spaceCoords[1]].unhighlight()
class messageCenter():
def __init__(self, x, y, text = "Waiting for voice command.", defaultColor = (0, 0, 0), fontSize = 20):
self.x = x
self.y = y
self.defaultColor = defaultColor
self.font = pygame.font.Font("fonts/Roboto-Regular.ttf", 20)
self.text = text
# Key to objects
self.objects = list()
self.spacing = 30
y_space = 50
for obj in object_types.obj_types:
ot = obj
obj = object_types.obj_types[obj]
if obj["shape"] == "circle":
# Shift x and y for radius
o = roomObject(obj["color"], circle=(self.x + 10, self.y + y_space + 10, 10), objType=ot, outline=obj["outline"])
l = [self.font.render(obj["description"], 1, self.defaultColor), self.x + 70, self.y + y_space]
else:
o = roomObject(obj["color"], (self.x, self.y + y_space, 60, 20), objType=ot, outline=obj["outline"])
l = [self.font.render(obj["description"], 1, self.defaultColor),
self.x + 70, self.y + y_space]
self.objects.append(o)
self.objects.append(l)
y_space += self.spacing
self.commands = [pygame.image.load("img/commands.png"), self.x, self.y + y_space]
def draw(self, surface, color = None):
# The key to objects
for obj in self.objects:
if type(obj) == list:
surface.blit(obj[0], obj[1:])
else:
obj.draw(surface)
surface.blit(self.commands[0], self.commands[1:])
# The message text
if self.text is not None:
if color is None:
color = self.defaultColor
text_surface = self.font.render(self.text, 1, color)
surface.blit(text_surface, (self.x, self.y))
def setText(self, text):
self.text = text
|
import asyncio
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from aiohttp import web
from ....config.injection_context import InjectionContext
from ....utils.stats import Collector
from ...outbound.message import OutboundMessage
from ..http import HttpTransport
class TestHttpTransport(AioHTTPTestCase):
async def setUpAsync(self):
self.context = InjectionContext()
self.message_results = []
async def receive_message(self, request):
payload = await request.json()
self.message_results.append(payload)
raise web.HTTPOk()
async def get_application(self):
"""
Override the get_app method to return your application.
"""
app = web.Application()
app.add_routes([web.post("/", self.receive_message)])
return app
@unittest_run_loop
async def test_handle_message(self):
server_addr = f"http://localhost:{self.server.port}"
async def send_message(transport, payload, endpoint):
async with transport:
await transport.handle_message(self.context, payload, endpoint)
transport = HttpTransport()
await asyncio.wait_for(send_message(transport, "{}", endpoint=server_addr), 5.0)
assert self.message_results == [{}]
@unittest_run_loop
async def test_stats(self):
server_addr = f"http://localhost:{self.server.port}"
async def send_message(transport, payload, endpoint):
async with transport:
await transport.handle_message(self.context, payload, endpoint)
transport = HttpTransport()
transport.collector = Collector()
await asyncio.wait_for(
send_message(transport, b"{}", endpoint=server_addr), 5.0
)
results = transport.collector.extract()
assert results["count"] == {
"outbound-http:dns_resolve": 1,
"outbound-http:connect": 1,
"outbound-http:POST": 1,
}
|
from analysis import makefigure
#from analysis import analysis
#from analysis import classdef
makefigure.makeSnap(datfile="TEST/snap000005.dat", pngfile="ae.png", bHeader=True)
|
import unittest
class TestJobExecutionOrder(unittest.TestCase):
# can actually execute in a number of different ways
# just need certain rules to hold with respect to the order
# A before F, G
# B, C before F
# F before H, I
# D before G before I
def order_checks(self, exec_order):
def chk(job_one, job_two):
self.assertTrue(
exec_order.index(job_one) < exec_order.index(job_two),
msg='Out of order: {} supposed to be before {}'.format(job_one, job_two)
)
pairs_to_check = [
('JobA', 'JobF'), ('JobB', 'JobF'), ('JobC', 'JobF'), ('JobF', 'JobH'),
('JobF', 'JobI'), ('JobD', 'JobG'), ('JobG', 'JobI'), ('JobA', 'JobG')
]
for job_one, job_two in pairs_to_check:
chk(job_one, job_two)
def setUp(self):
from treetl import Job
self.actual_execution_order = [ ]
def notify(job):
job_name = job.__class__.__name__
self.actual_execution_order.append(job_name)
class NotifyJob(Job):
def transform(self, **kwargs):
notify(self)
return self
class JobA(NotifyJob):
pass
class JobB(NotifyJob):
pass
class JobC(NotifyJob):
pass
class JobD(NotifyJob):
pass
class JobE(NotifyJob):
pass
@Job.dependency(b_data=JobB, c_data=JobC)
class JobF(NotifyJob):
pass
@Job.dependency(a_data=JobA, d_data=JobD)
class JobG(NotifyJob):
pass
@Job.dependency(a_data=JobA, f_data=JobF)
class JobH(NotifyJob):
pass
@Job.dependency(f_data=JobF, g_data=JobG)
class JobI(NotifyJob):
pass
@Job.dependency(f_data=JobF)
class FaultyJob(NotifyJob):
def transform(self, **kwargs):
super(FaultyJob, self).transform()
raise ValueError()
@Job.dependency(faulty_parent=FaultyJob)
class VictimJob(NotifyJob):
pass
@Job.dependency(faulty_parent=FaultyJob)
class OtherVictimJob(NotifyJob):
pass
# they don't need to be in order
self.jobs = [ JobA(), JobD(), JobC(), JobB(), JobE(), JobG(), JobF(), JobI(), JobH() ]
self.faulty_jobs = [ FaultyJob(), VictimJob(), OtherVictimJob() ]
def test_job_order(self):
from treetl import Job, JobRunner, JOB_STATUS
job_tree = JobRunner(self.jobs).run()
self.order_checks(self.actual_execution_order)
self.assertTrue(len(self.actual_execution_order) == len(self.jobs), msg='Some job transformed twice.')
# add jobs that will fail and dependents that won't run as a result
# clear execution order to start over again
self.actual_execution_order = []
job_tree.reset_jobs()
job_tree.add_jobs(self.faulty_jobs)
for failure_child in [ 'VictimJob', 'OtherVictimJob' ]:
self.assertNotIn(
member=failure_child,
container=self.actual_execution_order,
msg='Child of faulty, failed job was executed.'
)
self.assertTrue(job_tree.run().status == JOB_STATUS.FAILED, msg='Job failure not recorded in status')
self.assertItemsEqual(
expected_seq=self.faulty_jobs,
actual_seq=job_tree.failed_jobs(),
msg='Not all faulty jobs were labeled as failed.'
)
self.assertItemsEqual(
expected_seq=[ self.faulty_jobs[0] ],
actual_seq=job_tree.failed_job_roots(),
msg='Root failure not correctly identified.'
)
failed_root_paths_dict = job_tree.failed_job_root_paths()
self.assertTrue(len(failed_root_paths_dict) == 1, msg='Too many failure roots.')
self.assertItemsEqual(
expected_seq=[ self.faulty_jobs[0] ],
actual_seq=failed_root_paths_dict.keys(),
msg='Incorrect failed root in { failed_root: paths_to_failed_root }'
)
self.assertItemsEqual(
expected_seq=[
[ 'JobC', 'JobF', 'FaultyJob' ],
[ 'JobB', 'JobF', 'FaultyJob' ]
],
actual_seq=[
[ job.__class__.__name__ for job in path ]
for path in failed_root_paths_dict[self.faulty_jobs[0]]
],
msg='Incorrect paths to root failure.'
)
|
from .handler import Handler
|
from z80 import util, io, gui, registers, instructions
import copy
from time import sleep, time
import sys
from PySide.QtCore import *
from PySide.QtGui import *
import threading
#logging.basicConfig(level=logging.INFO)
class Z80SBC(io.Interruptable):
def __init__(self):
self.registers = registers.Registers()
self.instructions = instructions.InstructionSet(self.registers)
self._memory = bytearray(64*1024)
self._read_rom("../roms/ROM.HEX")
self._iomap = io.IOMap()
self._console = io.Console(self)
self._reg_gui = gui.RegistersGUI(self.registers)
self._mem_view = gui.MemoryView(self._memory, self.registers)
self._iomap.addDevice(self._console)
self._console.show()
self._reg_gui.show()
self._mem_view.show()
self._interrupted = False
def interrupt(self):
self._interrupted = True
def _read_rom(self, romfile):
with open(romfile, "r") as f:
while True:
line = f.readline()
if line[0] != ":":
raise Exception("Bad start code in hex file.")
count = int(line[1:3], 16)#.decode("hex")
address = int(line[3:7], 16) #.decode("hex")
if address + count > len(self._memory):
raise Exception("Trying to create M2764 ROM with too large a ROM file")
rtype = line[7:9]
pos = 9
if rtype == "01":
break
for b in range(count):
byte = int(line[pos+(2*b):pos+(2*b)+2], 16) #. decode("hex")
self._memory[address+b] = byte
def step_instruction(self):
ins, args = False, []
pc = self.registers.PC
if self._interrupted and self.registers.IFF:
self.registers.IFF = False
self._interrupted = False
if self.registers.IM == 1:
print "!!! Interrupt !!!"
ins, args = self.instructions << 0xCD
ins, args = self.instructions << 0x38
ins, args = self.instructions << 0x00
self.registers.IFF = False
else:
while not ins:
ins, args = self.instructions << self._memory[self.registers.PC]
self.registers.PC = util.inc16(self.registers.PC)
#print( "{0:X} : {1} ".format(pc, ins.assembler(args)))
rd = ins.get_read_list(args)
data = [0] * len(rd)
for n, i in enumerate(rd):
if i < 0x10000:
data[n] = self._memory[i]
else:
address = i & 0xFF
data[n] = self._iomap.address[address].read(address)
wrt = ins.execute(data, args)
for i in wrt:
if i[0] > 0x10000:
address = i[0] & 0xFF
#iomap.address[address].write.emit(address, i[1])
self._iomap.address[address].write(address, i[1])
#print (chr(i[1]))
else:
self._memory[i[0]] = i[1]
return ins, args
if __name__ == '__main__':
''' Main Program '''
qt_app = QApplication(sys.argv)
mach = Z80SBC()
def worker():
t = time()
while True:
# t = time()
ins, args = mach.step_instruction()
print ins.assembler(args)
sleep(0.00000001)
# print (time() - t) / ins.tstates
# mach._mem_view.update()
# mach._reg_gui.update()
thread = threading.Thread(target=worker)
thread.setDaemon(True)
thread.start()
qt_app.exec_()
|
# simple statistics module
import resource
def _systemtime ():
ru_self = resource.getrusage (resource.RUSAGE_SELF)
ru_ch = resource.getrusage (resource.RUSAGE_CHILDREN)
return ru_self.ru_utime + ru_ch.ru_utime
class Stopwatch:
""" A stop watch """
def __init__ (self):
self._started = 0
self._finished = -1
self._elapsed = 0
self.start ()
@property
def elapsed (self):
""" Returns time (in seconds) since the stopwatch has been started. """
if self._finished < self._started:
return self._elapsed + (_systemtime () - self._started)
return self._elapsed + (self._finished - self._started)
def start (self):
""" Starts or resumes the stopwatch """
# collect elapsed time so far
if self._finished >= self._started:
self._elapsed += (self._finished - self._started)
self._started = _systemtime ()
self._finished = -1
def stop (self):
""" Stops the stopwatch """
if self._finished < self._started:
self._finished = _systemtime ()
def reset (self):
""" Resets the stopwatch by erasing all elapsed time """
self._elapsed = 0
self._finished = -1
self.start ()
def __str__ (self):
""" Reports time in seconds up to two decimal places """
return "{0:.2f}".format (self.elapsed)
def lap (name):
class ContextManager (object):
def __init__ (self, name):
self._name = name
self._sw = Stopwatch ()
def __enter__ (self):
self._sw.reset ()
return None
def __exit__ (self, exc_type, exc_value, traceback):
print 'DONE', name, 'in', str(self._sw)
return False
return ContextManager (name)
_statistics = dict()
def get (key):
""" Gets a value from statistics table """
return _statistics.get (key)
def put (key, v):
""" Puts a value in statistics table """
_statistics[key] = v
def start (key):
""" Starts (or resumes) a named stopwatch """
sw = get (key)
if sw is None:
sw = Stopwatch ()
put (key, sw)
return sw
else:
sw.start ()
def stop (key):
""" Stops a named stopwatch """
sw = get (key)
if sw is not None: sw.stop ()
def count (key):
""" Increments a named counter """
c = get (key)
if c is None: put (key, 1)
else: put (key, c + 1)
def brunch_print ():
""" Prints the result in brunch format """
if not _statistics:
return
print '----------------------------------------------------------------------'
for k in sorted (_statistics.keys ()):
print 'BRUNCH_STAT {name} {value}'.format (name=k, value=_statistics [k])
print '----------------------------------------------------------------------'
def timer (key):
""" ContextManager to help measuring time.
with timer('myname') as t:
do_code_that_is_timed
"""
class TimerContextManager (object):
def __init__ (self, key):
self._key = key
def __enter__ (self):
start (key)
return None
def __exit__ (self, exc_type, exc_value, traceback):
stop (key)
return False
return TimerContextManager (key)
def block(mark):
class BlockMarkerContextManager (object):
def __init__ (self, mark):
self._mark = mark
def __enter__ (self):
print 'BEGIN:', mark
return None
def __exit__ (self, exc_type, exc_value, traceback):
print 'END:', mark
return False
return BlockMarkerContextManager (mark)
def count_stats (f):
def counted_func (*args, **kwds):
count (f.__module__ + "." + f.__name__ + ".cnt")
return f(*args, **kwds)
counted_func.__name__ = f.__name__
counted_func.__doc__ = f.__doc__
return counted_func
def time_stats (f):
""" Function decorator to time a function
@time_stats
def foo (): pass
"""
def timed_func (*args, **kwds):
count (f.__module__ + "." + f.__name__ + ".cnt")
with timer (f.__module__ + '.' + f.__name__):
return f(*args, **kwds)
timed_func.__name__ = f.__name__
timed_func.__doc__ = f.__doc__
return timed_func
@time_stats
def _test_function ():
c = 0
while c < 100000000: c += 1
if __name__ == '__main__':
import time
c= 0
count ('tick')
with timer ('timer') as t:
while c < 10000000: c += 1
brunch_print ()
c = 0
count ('tick')
with timer ('timer') as t:
while c < 10000000: c += 1
brunch_print ()
_test_function ()
brunch_print ()
|
# -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller
from bs4 import BeautifulSoup
class PaginationController(Controller):
def get(self):
self.test_simple_1()
self.test_simple_2()
self.test_all()
self.test_first_page()
self.test_last_page()
self.test_first_block()
self.test_last_block()
self.test_render()
def test_render(self):
params = {
'total_count': 100,
'page': 3,
'rpp': 10,
'kwargs': {
}
}
self.render('tests/view/module/pagination.html', params)
def test_simple_1(self):
params = {
'total_count': 100,
'page': 3,
'rpp': 10,
'kwargs': {
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll('div')) == 1)
assert(len(pagination.find('div').findAll('strong')) == 1)
assert(len(pagination.find('div').findAll('a')) == 9)
def test_simple_2(self):
params = {
'total_count': 1000,
'page': 25,
'rpp': 10,
'kwargs': {
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll('div')) == 1)
assert(len(pagination.find('div').findAll('strong')) == 1)
assert(len(pagination.find('div').findAll('a')) == 13)
def test_all(self):
region_tag = 'div'
region_class = 'region-class'
first = 'First'
first_class = 'first-class'
last = 'Last'
last_class = 'last-class'
prev_block = 'Prev-Block'
prev_block_class = 'prev-block-class'
next_block = 'Next-Block'
next_block_class = 'next-block-class'
prev = 'Prev'
prev_class = 'prev-class'
next = 'Next'
next_class = 'next-class'
current_tag = 'strong'
current_class = 'current-class'
link_tag = 'a'
link_class = 'link-class'
params = {
'total_count': 10000,
'page': 33,
'rpp': 10,
'kwargs': {
'region_tag': region_tag,
'region_class': region_class,
'first': first,
'first_class': first_class,
'last': last,
'last_class': last_class,
'prev_block': prev_block,
'prev_block_class': prev_block_class,
'next_block': next_block,
'next_block_class': next_block_class,
'prev': prev,
'prev_class': prev_class,
'next': next,
'next_class': next_class,
'current_tag': current_tag,
'current_class': current_class,
'link_tag': link_tag,
'link_class': link_class,
'space': '_'
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll(region_tag)) == 1)
assert(pagination.find(region_tag).attrs['class'][0] == region_class)
assert(len(pagination.find(region_tag).findAll(link_tag)) == 15)
links = pagination.find(region_tag).findAll(link_tag)
assert(links[0].attrs['class'][0] == first_class)
assert(links[0].text == first)
assert(links[1].attrs['class'][0] == prev_block_class)
assert(links[1].text == prev_block)
assert(links[2].attrs['class'][0] == prev_class)
assert(links[2].text == prev)
assert(links[-3].attrs['class'][0] == next_class)
assert(links[-3].text == next)
assert(links[-2].attrs['class'][0] == next_block_class)
assert(links[-2].text == next_block)
assert(links[-1].attrs['class'][0] == last_class)
assert(links[-1].text == last)
links = links[3:-3]
for e in links:
assert(e.name == link_tag)
assert(e.attrs['class'][0] == link_class)
assert(self.helper.numeric.extract_numbers(e.text) == e.text)
def test_first_page(self):
region_tag = 'div'
region_class = 'region-class'
first = 'First'
first_class = 'first-class'
last = 'Last'
last_class = 'last-class'
prev_block = 'Prev-Block'
prev_block_class = 'prev-block-class'
next_block = 'Next-Block'
next_block_class = 'next-block-class'
prev = 'Prev'
prev_class = 'prev-class'
next = 'Next'
next_class = 'next-class'
current_tag = 'strong'
current_class = 'current-class'
link_tag = 'a'
link_class = 'link-class'
params = {
'total_count': 10000,
'page': 1,
'rpp': 10,
'kwargs': {
'region_tag': region_tag,
'region_class': region_class,
'first': first,
'first_class': first_class,
'last': last,
'last_class': last_class,
'prev_block': prev_block,
'prev_block_class': prev_block_class,
'next_block': next_block,
'next_block_class': next_block_class,
'prev': prev,
'prev_class': prev_class,
'next': next,
'next_class': next_class,
'current_tag': current_tag,
'current_class': current_class,
'link_tag': link_tag,
'link_class': link_class,
'space': '_'
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll(region_tag)) == 1)
assert(pagination.find(region_tag).attrs['class'][0] == region_class)
assert(len(pagination.find(region_tag).findAll(link_tag)) == (15 - 3))
links = pagination.find(region_tag).findAll(link_tag)
assert(links[-3].attrs['class'][0] == next_class)
assert(links[-3].text == next)
assert(links[-2].attrs['class'][0] == next_block_class)
assert(links[-2].text == next_block)
assert(links[-1].attrs['class'][0] == last_class)
assert(links[-1].text == last)
links = links[0:-3]
for e in links:
assert(e.name == link_tag)
assert(e.attrs['class'][0] == link_class)
assert(self.helper.numeric.extract_numbers(e.text) == e.text)
def test_last_page(self):
region_tag = 'div'
region_class = 'region-class'
first = 'First'
first_class = 'first-class'
last = 'Last'
last_class = 'last-class'
prev_block = 'Prev-Block'
prev_block_class = 'prev-block-class'
next_block = 'Next-Block'
next_block_class = 'next-block-class'
prev = 'Prev'
prev_class = 'prev-class'
next = 'Next'
next_class = 'next-class'
current_tag = 'strong'
current_class = 'current-class'
link_tag = 'a'
link_class = 'link-class'
params = {
'total_count': 10000,
'page': 1000,
'rpp': 10,
'kwargs': {
'region_tag': region_tag,
'region_class': region_class,
'first': first,
'first_class': first_class,
'last': last,
'last_class': last_class,
'prev_block': prev_block,
'prev_block_class': prev_block_class,
'next_block': next_block,
'next_block_class': next_block_class,
'prev': prev,
'prev_class': prev_class,
'next': next,
'next_class': next_class,
'current_tag': current_tag,
'current_class': current_class,
'link_tag': link_tag,
'link_class': link_class,
'space': '_'
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll(region_tag)) == 1)
assert(pagination.find(region_tag).attrs['class'][0] == region_class)
assert(len(pagination.find(region_tag).findAll(link_tag)) == (15 - 3))
links = pagination.find(region_tag).findAll(link_tag)
assert(links[0].attrs['class'][0] == first_class)
assert(links[0].text == first)
assert(links[1].attrs['class'][0] == prev_block_class)
assert(links[1].text == prev_block)
assert(links[2].attrs['class'][0] == prev_class)
assert(links[2].text == prev)
links = links[3:]
for e in links:
assert(e.name == link_tag)
assert(e.attrs['class'][0] == link_class)
assert(self.helper.numeric.extract_numbers(e.text) == e.text)
def test_first_block(self):
region_tag = 'div'
region_class = 'region-class'
first = 'First'
first_class = 'first-class'
last = 'Last'
last_class = 'last-class'
prev_block = 'Prev-Block'
prev_block_class = 'prev-block-class'
next_block = 'Next-Block'
next_block_class = 'next-block-class'
prev = 'Prev'
prev_class = 'prev-class'
next = 'Next'
next_class = 'next-class'
current_tag = 'strong'
current_class = 'current-class'
link_tag = 'a'
link_class = 'link-class'
params = {
'total_count': 10000,
'page': 2,
'rpp': 10,
'kwargs': {
'region_tag': region_tag,
'region_class': region_class,
'first': first,
'first_class': first_class,
'last': last,
'last_class': last_class,
'prev_block': prev_block,
'prev_block_class': prev_block_class,
'next_block': next_block,
'next_block_class': next_block_class,
'prev': prev,
'prev_class': prev_class,
'next': next,
'next_class': next_class,
'current_tag': current_tag,
'current_class': current_class,
'link_tag': link_tag,
'link_class': link_class,
'space': '_'
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll(region_tag)) == 1)
assert(pagination.find(region_tag).attrs['class'][0] == region_class)
assert(len(pagination.find(region_tag).findAll(link_tag)) == (15 - 2))
links = pagination.find(region_tag).findAll(link_tag)
assert(links[0].attrs['class'][0] == prev_class)
assert(links[0].text == prev)
assert(links[-3].attrs['class'][0] == next_class)
assert(links[-3].text == next)
assert(links[-2].attrs['class'][0] == next_block_class)
assert(links[-2].text == next_block)
assert(links[-1].attrs['class'][0] == last_class)
assert(links[-1].text == last)
links = links[1:-3]
for e in links:
assert(e.name == link_tag)
assert(e.attrs['class'][0] == link_class)
assert(self.helper.numeric.extract_numbers(e.text) == e.text)
def test_last_block(self):
region_tag = 'div'
region_class = 'region-class'
first = 'First'
first_class = 'first-class'
last = 'Last'
last_class = 'last-class'
prev_block = 'Prev-Block'
prev_block_class = 'prev-block-class'
next_block = 'Next-Block'
next_block_class = 'next-block-class'
prev = 'Prev'
prev_class = 'prev-class'
next = 'Next'
next_class = 'next-class'
current_tag = 'strong'
current_class = 'current-class'
link_tag = 'a'
link_class = 'link-class'
params = {
'total_count': 10000,
'page': 999,
'rpp': 10,
'kwargs': {
'region_tag': region_tag,
'region_class': region_class,
'first': first,
'first_class': first_class,
'last': last,
'last_class': last_class,
'prev_block': prev_block,
'prev_block_class': prev_block_class,
'next_block': next_block,
'next_block_class': next_block_class,
'prev': prev,
'prev_class': prev_class,
'next': next,
'next_class': next_class,
'current_tag': current_tag,
'current_class': current_class,
'link_tag': link_tag,
'link_class': link_class,
'space': '_'
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll(region_tag)) == 1)
assert(pagination.find(region_tag).attrs['class'][0] == region_class)
assert(len(pagination.find(region_tag).findAll(link_tag)) == (15 - 2))
links = pagination.find(region_tag).findAll(link_tag)
assert(links[0].attrs['class'][0] == first_class)
assert(links[0].text == first)
assert(links[1].attrs['class'][0] == prev_block_class)
assert(links[1].text == prev_block)
assert(links[2].attrs['class'][0] == prev_class)
assert(links[2].text == prev)
assert(links[-1].attrs['class'][0] == next_class)
assert(links[-1].text == next)
links = links[3:-1]
for e in links:
assert(e.name == link_tag)
assert(e.attrs['class'][0] == link_class)
assert(self.helper.numeric.extract_numbers(e.text) == e.text)
|
import util
def solve(input_path):
with open(input_path) as f:
line_points = tuple(util.completion_score(s.rstrip()) for s in f)
positive_points = tuple(filter(lambda s: s > 0, line_points))
return sorted(positive_points)[int(len(positive_points) / 2)]
if __name__ == '__main__':
print(solve('input.txt'))
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1.extras import kerberos
from keystoneauth1 import loading
class Kerberos(loading.BaseV3Loader):
@property
def plugin_class(self):
return kerberos.Kerberos
@property
def available(self):
return kerberos.requests_kerberos is not None
class MappedKerberos(loading.BaseFederationLoader):
@property
def plugin_class(self):
return kerberos.MappedKerberos
@property
def available(self):
return kerberos.requests_kerberos is not None
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from contextlib import contextmanager
from pants.base.revision import Revision
from pants.scm.git import Git
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.process_handler import subprocess
MIN_REQUIRED_GIT_VERSION = Revision.semver('1.7.10')
def git_version():
"""Get a Version() based on installed command-line git's version"""
process = subprocess.Popen(['git', '--version'], stdout=subprocess.PIPE)
(stdout, stderr) = process.communicate()
assert process.returncode == 0, "Failed to determine git version."
# stdout is like 'git version 1.9.1.598.g9119e8b\n' We want '1.9.1.598'
matches = re.search(r'\s(\d+(?:\.\d+)*)[\s\.]', stdout.decode('utf-8'))
return Revision.lenient(matches.group(1))
def get_repo_root():
"""Return the absolute path to the root directory of the Pants git repo."""
return subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).strip()
@contextmanager
def initialize_repo(worktree, gitdir=None):
"""Initialize a git repository for the given `worktree`.
NB: The given `worktree` must contain at least one file which will be committed to form an initial
commit.
:param string worktree: The path to the git work tree.
:param string gitdir: An optional path to the `.git` dir to use.
:returns: A `Git` repository object that can be used to interact with the repo.
:rtype: :class:`pants.scm.git.Git`
"""
@contextmanager
def use_gitdir():
if gitdir:
yield gitdir
else:
with temporary_dir() as d:
yield d
with use_gitdir() as git_dir, environment_as(GIT_DIR=git_dir, GIT_WORK_TREE=worktree):
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'config', 'user.email', 'you@example.com'])
subprocess.check_call(['git', 'config', 'user.name', 'Your Name'])
subprocess.check_call(['git', 'add', '.'])
subprocess.check_call(['git', 'commit', '-am', 'Add project files.'])
yield Git(gitdir=git_dir, worktree=worktree)
|
"Test for Range Query"
from progressivis.table.constant import Constant
from progressivis.table.table import Table
from progressivis import Print
from progressivis.stats import RandomTable, Min, Max
from progressivis.core.bitmap import bitmap
from progressivis.table.range_query import RangeQuery
from progressivis.utils.psdict import PsDict
from progressivis.core import aio
from . import ProgressiveTest, main
class TestRangeQuery(ProgressiveTest):
"Test Suite for RangeQuery Module"
def tearDown(self):
TestRangeQuery.cleanup()
def test_range_query(self):
"Run tests of the RangeQuery module"
s = self.scheduler()
with s:
random = RandomTable(2, rows=1000, scheduler=s)
t_min = PsDict({'_1': 0.3})
min_value = Constant(table=t_min, scheduler=s)
t_max = PsDict({'_1': 0.8})
max_value = Constant(table=t_max, scheduler=s)
range_qry = RangeQuery(column='_1', scheduler=s)
range_qry.create_dependent_modules(random, 'result',
min_value=min_value,
max_value=max_value)
prt = Print(proc=self.terse, scheduler=s)
prt.input[0] = range_qry.output.result
aio.run(s.start())
idx = range_qry.input_module\
.output['result']\
.data().eval('(_1>0.3)&(_1<0.8)', result_object='index')
self.assertEqual(range_qry.result.index, bitmap(idx))
def test_hist_index_min_max(self):
"Test min_out and max_out on HistogramIndex"
s = self.scheduler()
with s:
random = RandomTable(2, rows=100000, scheduler=s)
t_min = PsDict({'_1': 0.3})
min_value = Constant(table=t_min, scheduler=s)
t_max = PsDict({'_1': 0.8})
max_value = Constant(table=t_max, scheduler=s)
range_qry = RangeQuery(column='_1', scheduler=s)
range_qry.create_dependent_modules(random, 'result',
min_value=min_value,
max_value=max_value)
prt = Print(proc=self.terse, scheduler=s)
prt.input[0] = range_qry.output.result
hist_index = range_qry.hist_index
min_=Min(name='min_'+str(hash(hist_index)), scheduler=s)
min_.input[0] = hist_index.output.min_out
prt2 = Print(proc=self.terse, scheduler=s)
prt2.input[0] = min_.output.result
max_=Max(name='max_'+str(hash(hist_index)), scheduler=s)
max_.input[0] = hist_index.output.max_out
pr3=Print(proc=self.terse, scheduler=s)
pr3.input[0] = max_.output.result
aio.run(s.start())
res1 = random.result.min()['_1']
res2 = min_.result['_1']
self.assertAlmostEqual(res1, res2)
res1 = random.result.max()['_1']
res2 = max_.result['_1']
self.assertAlmostEqual(res1, res2)
def _query_min_max_impl(self, random, t_min, t_max, s):
min_value = Constant(table=t_min, scheduler=s)
max_value = Constant(table=t_max, scheduler=s)
range_qry = RangeQuery(column='_1', scheduler=s)
range_qry.create_dependent_modules(random, 'result',
min_value=min_value,
max_value=max_value)
prt = Print(proc=self.terse, scheduler=s)
prt.input[0] = range_qry.output.result
prt2 = Print(proc=self.terse, scheduler=s)
prt2.input[0] = range_qry.output.min
pr3 = Print(proc=self.terse, scheduler=s)
pr3.input[0] = range_qry.output.max
return range_qry
def test_range_query_min_max(self):
"Test min and max on RangeQuery output"
s = self.scheduler()
with s:
random = RandomTable(2, rows=100000, scheduler=s)
t_min = PsDict({'_1': 0.3})
t_max = PsDict({'_1': 0.8})
range_qry = self._query_min_max_impl(random, t_min, t_max, s)
aio.run(s.start())
min_data = range_qry.output.min.data()
max_data = range_qry.output.max.data()
self.assertAlmostEqual(min_data['_1'], 0.3)
self.assertAlmostEqual(max_data['_1'], 0.8)
def test_range_query_min_max2(self):
"Test min and max on RangeQuery output"
s = self.scheduler()
with s:
random = RandomTable(2, rows=100000, scheduler=s)
t_min = PsDict({'_1': 0.0})
t_max = PsDict({'_1': float('nan')})
range_qry = self._query_min_max_impl(random, t_min, t_max, s)
aio.run(s.start())
min_data = range_qry.output.min.data()
max_data = range_qry.output.max.data()
min_rand = random.result.min()['_1']
self.assertAlmostEqual(min_data['_1'], min_rand, delta=0.0001)
self.assertAlmostEqual(max_data['_1'], 1.0, delta=0.0001)
def test_range_query_min_max3(self):
"Test min and max on RangeQuery output"
s = self.scheduler()
with s:
random = RandomTable(2, rows=100000, scheduler=s)
t_min = PsDict({'_1': 0.3})
t_max = PsDict({'_1': 15000.})
range_qry = self._query_min_max_impl(random, t_min, t_max, s)
aio.run(s.start())
min_data = range_qry.output.min.data()
max_data = range_qry.output.max.data()
max_rand = random.result.max()['_1']
self.assertAlmostEqual(min_data['_1'], 0.3)
self.assertAlmostEqual(max_data['_1'], max_rand)
if __name__ == '__main__':
main()
|
import fw
class Magnifier(fw.Gen):
def gen(self, x, y):
wid = 2
xx, yy = x // wid, y // wid
v = (xx + yy) % 3
# print (xx, yy, v)
if v == 0:
return 0
elif v == 1:
return 128
else:
return self.maxval
if __name__ == "__main__":
w, h = 1024, 1024
fmt = fw.AsciiPixelFmt
fw.main(h,w, fmt, Magnifier)
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
GeodesicDensifier
A QGIS plugin
Adds vertices to geometry along geodesic lines
-------------------
begin : 2018-02-21
git sha : $Format:%H$
copyright : (C) 2018 by Jonah Sullivan
email : jonah.sullivan@ga.gov.au
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
try:
# use system version of geographiclib
from geographiclib.geodesic import Geodesic
print("import worked")
except ImportError:
# use version of geographiclib distributed with plugin
import site
from os.path import abspath, dirname
from inspect import getsourcefile
# this will get the path for this file and add it to the system PATH
# so the geographiclib folder can be found
print(dirname((getsourcefile(lambda:0))))
site.addsitedir(dirname(abspath(getsourcefile(lambda: 0))))
from geographiclib.geodesic import Geodesic
import math
from qgis.core import QgsCoordinateReferenceSystem, QgsCoordinateTransform, QgsWkbTypes, QgsFeature, QgsPointXY, QgsGeometry, QgsField, QgsProject
from PyQt5.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QVariant
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from geodesic_densifier_dialog import GeodesicDensifierDialog
import os.path
class GeodesicDensifier:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# Create the dialog (after translation) and keep reference
self.dlg = GeodesicDensifierDialog()
# Declare instance attributes
self.actions = []
self.menu = u'&Geodesic Densifier'
self.toolbar = self.iface.addToolBar(u'GeodesicDensifier')
self.toolbar.setObjectName(u'GeodesicDensifier')
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/GeodesicDensifier3/icon.png'
self.add_action(
icon_path,
text=u'Geodesic Densifier',
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(u'&Geodesic Densifier', action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
def run(self):
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
# set default values
self.inLayer = self.dlg.mMapLayerComboBox.currentLayer()
def set_in_layer():
self.inLayer = self.dlg.mMapLayerComboBox.currentLayer()
if self.inLayer:
if self.inLayer.crs():
self.dlg.messageBox.setText("Input Layer Set: " + str(self.inLayer.name()))
else:
self.dlg.messageBox.setText("Error: Input must have projection defined")
# listener to set input layer when combo box changes
self.dlg.mMapLayerComboBox.layerChanged.connect(set_in_layer)
# clear the ellipsoid combobox
self.dlg.EllipsoidcomboBox.clear()
ellipsoid_dict = {'WGS84': [6378137, 298.2572236],
'165': [6378165.000, 298.3],
'ANS': [6378160, 298.25],
'CLARKE 1858': [6378293.645, 294.26],
'GRS80': [6378137, 298.2572221],
'WGS72': [6378135, 298.26],
'International 1924': [6378388, 297]}
# add items to ellipsoid combobox
for k in list(ellipsoid_dict.keys()):
self.dlg.EllipsoidcomboBox.addItem(str(k))
# default ellipsoid is WGS84
self.ellipsoid_a = 6378137.0
self.ellipsoid_f = 298.2572236
self.ellipsoid_name = 'WGS84'
def set_in_ellipsoid():
in_ellipsoid_name = self.dlg.EllipsoidcomboBox.currentText()
for k in list(ellipsoid_dict.keys()):
if k == in_ellipsoid_name:
self.ellipsoid_a = ellipsoid_dict[k][0]
self.ellipsoid_f = ellipsoid_dict[k][1]
self.ellipsoid_name = k
self.dlg.messageBox.setText("Ellipsoid set to " + str(k))
# listener to set input ellipsoid when combo box changes
self.dlg.EllipsoidcomboBox.currentIndexChanged.connect(set_in_ellipsoid)
# default point spacing is 900
self.spacing = 900
def set_in_spacing():
self.spacing = int(self.dlg.spacingSpinBox.value())
self.dlg.messageBox.setText("Point spacing set to " + str(self.spacing) + "m")
# listener to set input point spacing when spin box changes
self.dlg.spacingSpinBox.valueChanged.connect(set_in_spacing)
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
# set the input layer
self.inLayer = self.dlg.mMapLayerComboBox.currentLayer()
# get the field list
fields = self.inLayer.fields()
# handle layers that aren't WGS84 (EPSG:4326)
wgs84crs = QgsCoordinateReferenceSystem("EPSG:4326")
if self.inLayer.crs() != wgs84crs:
transtowgs84 = QgsCoordinateTransform(self.inLayer.crs(), wgs84crs, QgsProject.instance())
transfromwgs84 = QgsCoordinateTransform(wgs84crs, self.inLayer.crs(), QgsProject.instance())
# get input geometry type
if self.inLayer.geometryType() == QgsWkbTypes.PointGeometry:
self.inType = 'Point' # works
if self.inLayer.geometryType() == QgsWkbTypes.LineGeometry:
self.inType = 'LineString' # works
if self.inLayer.geometryType() == QgsWkbTypes.PolygonGeometry:
self.inType = 'Polygon' # works
# setup output layers
if self.inType == 'Point':
self.create_point = True
# create and add to map canvas a point memory layer
layer_name = "Densified Point " + str(self.ellipsoid_name) + " " + str(self.spacing) + "m"
out_point_layer = self.iface.addVectorLayer("Point?crs={}".format(self.inLayer.crs().authid()),
layer_name,
"memory")
# set data provider
pointPr = out_point_layer.dataProvider()
# add attribute fields
pointPr.addAttributes(fields)
pointTypeField = ''
if "pointType" not in [field.name() for field in fields]:
pointTypeField = "pointType"
elif "pntType" not in [field.name() for field in fields]:
pointTypeField = "pntType"
elif "pntTyp" not in [field.name() for field in fields]:
pointTypeField = "pntTyp"
pointPr.addAttributes([QgsField(pointTypeField, QVariant.String)])
out_point_layer.updateFields()
else:
self.create_point = False
if self.inType == 'LineString':
self.create_polyline = True
# create and add to map canvas a polyline memory layer
layer_name = "Densified Line " + str(self.ellipsoid_name) + " " + str(self.spacing) + "m"
out_line_layer = self.iface.addVectorLayer("LineString?crs={}".format(self.inLayer.crs().authid()),
layer_name,
"memory")
# set data provider
linePr = out_line_layer.dataProvider()
# add attribute fields
linePr.addAttributes(fields)
out_line_layer.updateFields()
else:
self.create_polyline = False
if self.inType == 'Polygon':
self.create_polygon = True
# create and add to map canvas a polyline memory layer
layer_name = "Densified Polygon " + str(self.ellipsoid_name) + " " + str(self.spacing) + "m"
out_poly_layer = self.iface.addVectorLayer("Polygon?crs={}".format(self.inLayer.crs().authid()),
layer_name,
"memory")
# set data provider
polyPr = out_poly_layer.dataProvider()
# add attribute fields
polyPr.addAttributes(fields)
out_poly_layer.updateFields()
else:
self.create_polygon = False
# Create a geographiclib Geodesic object
self.geod = Geodesic(self.ellipsoid_a, 1 / self.ellipsoid_f)
def densifyPoint(inLayer, pr):
pointTypeFieldIdx = pr.fieldNameIndex(pointTypeField)
iterator = inLayer.getFeatures()
featureCount = pr.featureCount()
counter = 0
currentFeature = QgsFeature()
badGeom = 0
for feature in iterator:
if feature.geometry().wkbType() == QgsWkbTypes.Point:
try:
if counter == 0:
pointxy = feature.geometry().asPoint()
currentFeature.setGeometry(QgsGeometry.fromPointXY(pointxy))
attr = feature.attributes()
attr.append("Original")
currentFeature.setAttributes(attr)
pr.addFeatures([currentFeature])
else:
startPt = currentFeature.geometry().asPoint()
endPt = feature.geometry().asPoint()
if self.inLayer.crs() != wgs84crs:
startPt = transtowgs84.transform(startPt)
endPt = transtowgs84.transform(endPt)
# create a geographiclib line object
lineObject = self.geod.InverseLine(startPt.y(), startPt.x(), endPt.y(), endPt.x())
# determine how many densified segments there will be
n = int(math.ceil(lineObject.s13 / self.spacing))
# adjust the spacing distance
seglen = lineObject.s13 / n
for i in range(1, n):
if i > 0:
s = seglen * i
g = lineObject.Position(s, Geodesic.LATITUDE | Geodesic.LONGITUDE | Geodesic.LONG_UNROLL)
geom = QgsPointXY(g['lon2'], g['lat2'])
attr = feature.attributes()
attr.append("Densified")
currentFeature.setAttributes(attr)
if self.inLayer.crs() != wgs84crs: # Convert each point back to the output CRS
geom = transfromwgs84.transform(geom)
currentFeature.setGeometry(QgsGeometry.fromPointXY(geom))
pr.addFeatures([currentFeature])
geom = feature.geometry().asPoint()
currentFeature.setGeometry(QgsGeometry.fromPointXY(geom))
attr = feature.attributes()
attr.append("Original")
currentFeature.setAttributes(attr)
pr.addFeatures([currentFeature])
counter += 1
except:
badGeom += 1
counter += 1
else:
badGeom += 1
self.iface.messageBar().pushWarning("multipoint geometries will not be densified")
if badGeom > 0:
self.iface.messageBar().pushWarning("", "{} features failed".format(badGeom))
def densifyLine(inLayer, pr):
badGeom = 0
iterator = inLayer.getFeatures()
# create empty feature to write to
newLine = QgsFeature()
segments = []
for feature in iterator:
try:
if feature.geometry().wkbType() == QgsWkbTypes.LineString:
segments = [feature.geometry().asPolyline()]
elif feature.geometry().wkbType() == QgsWkbTypes.MultiLineString:
segments = feature.geometry().asMultiPolyline()
else:
badGeom += 1
segmentCount = len(segments)
except:
badGeom += 1
if feature.geometry().wkbType() == QgsWkbTypes.LineString:
line = segments[0]
pointCount = len(line)
startPt = QgsPointXY(line[0][0], line[0][1])
if self.inLayer.crs() != wgs84crs:
startPt = transtowgs84.transform(startPt)
pointList = [startPt]
for i in range(1,pointCount):
endPt = QgsPointXY(line[i][0], line[i][1])
if self.inLayer.crs() != wgs84crs:
endPt = transtowgs84.transform(endPt)
# create a geographiclib line object
lineObject = self.geod.InverseLine(startPt.y(), startPt.x(), endPt.y(), endPt.x())
# determine how many densified segments there will be
n = int(math.ceil(lineObject.s13 / self.spacing))
if lineObject.s13 > self.spacing:
seglen = lineObject.s13 / n
for i in range(1, n):
s = seglen * i
g = lineObject.Position(s, Geodesic.LATITUDE | Geodesic.LONGITUDE | Geodesic.LONG_UNROLL)
pointList.append(QgsPointXY(g['lon2'], g['lat2']))
pointList.append(endPt)
startPt = endPt
if self.inLayer.crs() != wgs84crs: # Convert each point back to the output CRS
for x, pt in enumerate(pointList):
pointList[x] = transfromwgs84.transform(pt)
newLine.setGeometry(QgsGeometry.fromPolylineXY(pointList))
elif feature.geometry().wkbType() == QgsWkbTypes.MultiLineString:
outsegment = []
for line in segments:
pointCount = len(line)
startPt = QgsPointXY(line[0][0], line[0][1])
if self.inLayer.crs() != wgs84crs: # Convert to 4326
startPt = transtowgs84.transform(startPt)
pts = [startPt]
for x in range(1, pointCount):
endPt = QgsPointXY(line[x][0], line[x][1])
if self.inLayer.crs() != wgs84crs: # Convert to 4326
endPt = transtowgs84.transform(endPt)
lineObject = self.geod.InverseLine(startPt.y(), startPt.x(), endPt.y(), endPt.x())
n = int(math.ceil(lineObject.s13 / self.spacing))
if lineObject.s13 > self.spacing:
seglen = lineObject.s13 / n
for i in range(1, n):
s = seglen * i
g = lineObject.Position(s, Geodesic.LATITUDE | Geodesic.LONGITUDE | Geodesic.LONG_UNROLL)
pts.append(QgsPointXY(g['lon2'], g['lat2']))
pts.append(endPt)
startPt = endPt
if self.inLayer.crs() != wgs84crs: # Convert each point back to the output CRS
for x, pt in enumerate(pts):
pts[x] = transfromwgs84.transform(pt)
outsegment.append(pts)
newLine.setGeometry(QgsGeometry.fromMultiPolylineXY(outsegment))
else:
badGeom += 1
newLine.setAttributes(feature.attributes())
pr.addFeatures([newLine])
if badGeom > 0:
self.iface.messageBar().pushWarning("", "{} features failed".format(badGeom))
def densifyPolygon(inLayer, pr):
badGeom = 0
iterator = inLayer.getFeatures()
# create empty feature to write to
newPoly = QgsFeature()
for feature in iterator:
try:
if feature.geometry().wkbType() == QgsWkbTypes.Polygon:
polygon = feature.geometry().asPolygon()
polyCount = len(polygon)
pointList = []
for points in polygon:
pointCount = len(points)
startPt = QgsPointXY(points[0][0], points[0][1])
if self.inLayer.crs() != wgs84crs:
startPt = transtowgs84.transform(startPt)
polyPointList = [startPt]
for i in range(1, pointCount):
endPt = QgsPointXY(points[i][0], points[i][1])
if self.inLayer.crs() != wgs84crs: # Convert to 4326
endPt = transtowgs84.transform(endPt)
lineObject = self.geod.InverseLine(startPt.y(), startPt.x(), endPt.y(), endPt.x())
n = int(math.ceil(lineObject.s13 / self.spacing))
seglen = lineObject.s13 / n
for i in range(1, n):
s = seglen * i
g = lineObject.Position(s, Geodesic.LATITUDE | Geodesic.LONGITUDE | Geodesic.LONG_UNROLL)
polyPointList.append(QgsPointXY(g['lon2'], g['lat2']))
polyPointList.append(endPt)
startPt = endPt
if self.inLayer.crs() != wgs84crs:
for x, pt in enumerate(polyPointList):
polyPointList[x] = transfromwgs84.transform(pt)
outPolygon = QgsFeature()
outPolygon.setGeometry(QgsGeometry.fromPolygonXY([polyPointList]))
outPolygon.setAttributes(feature.attributes())
pr.addFeatures([outPolygon])
elif feature.geometry().wkbType() == QgsWkbTypes.MultiPolygon:
print("multipoly")
multipolygon = feature.geometry().asMultiPolygon()
multiPointList = []
for polygon in multipolygon:
polyPointList = []
for points in polygon:
print('points:', points)
pointCount = len(points)
startPt = QgsPointXY(points[0][0], points[0][1])
print('startPt:', startPt)
if self.inLayer.crs() != wgs84crs:
startPt = transtowgs84.transform(startPt)
polyPointList = [startPt]
for i in range(1, pointCount):
endPt = QgsPointXY(points[i][0], points[i][1])
print('endPt:', endPt)
if self.inLayer.crs() != wgs84crs: # Convert to 4326
endPt = transtowgs84.transform(endPt)
lineObject = self.geod.InverseLine(startPt.y(), startPt.x(), endPt.y(), endPt.x())
n = int(math.ceil(lineObject.s13 / self.spacing))
seglen = lineObject.s13 / n
for i in range(1, n):
s = seglen * i
g = lineObject.Position(s, Geodesic.LATITUDE | Geodesic.LONGITUDE | Geodesic.LONG_UNROLL)
polyPointList.append(QgsPointXY(g['lon2'], g['lat2']))
polyPointList.append(endPt)
startPt = endPt
if self.inLayer.crs() != wgs84crs:
for x, pt in enumerate(polyPointList):
polyPointList[x] = transfromwgs84.transform(pt)
multiPointList.append(polyPointList)
print('multiPointList:', multiPointList)
outMultiPolygon = QgsFeature()
print('setGeometry')
outMultiPolygon.setGeometry(QgsGeometry.fromMultiPolygonXY([multiPointList]))
print('setAttributes')
outMultiPolygon.setAttributes(feature.attributes())
print('addFeatures')
pr.addFeatures([outMultiPolygon])
print('finished')
else:
badGeom += 1
except:
badGeom += 1
if badGeom > 0:
self.iface.messageBar().pushWarning("", "{} features failed".format(badGeom))
if self.create_point:
densifyPoint(self.inLayer, pointPr)
out_point_layer.reload()
if self.create_polyline:
densifyLine(self.inLayer, linePr)
out_line_layer.reload()
if self.create_polygon:
densifyPolygon(self.inLayer, polyPr)
out_poly_layer.reload()
|
# Please note: While I was able to find these constants within the source code, on my system (using LibreOffice,) I was only presented with a solid line, varying from thin to thick; no dotted or dashed lines.
import xlwt
'''workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('My Sheet')
borders = xlwt.Borders() # Create Borders
borders.left = xlwt.Borders.THIN
# DASHED虚线
# NO_LINE没有
# THIN实线
# May be: NO_LINE, THIN, MEDIUM, DASHED, DOTTED, THICK, DOUBLE, HAIR, MEDIUM_DASHED, THIN_DASH_DOTTED, MEDIUM_DASH_DOTTED, THIN_DASH_DOT_DOTTED, MEDIUM_DASH_DOT_DOTTED, SLANTED_MEDIUM_DASH_DOTTED, or 0x00 through 0x0D.
borders.right = xlwt.Borders.THIN
borders.top = xlwt.Borders.THIN
borders.bottom = xlwt.Borders.THIN
borders.left_colour = 0x40
borders.right_colour = 0x40
borders.top_colour = 0x40
borders.bottom_colour = 0x40
style = xlwt.XFStyle() # Create Style
style.borders = borders # Add Borders to Style
worksheet.write(0, 0, 'Cell Contents', style)
workbook.save('Excel_Workbook.xls')'''
def site_write_line_style():
borders = xlwt.Borders() # Create Borders
borders.left = xlwt.Borders.NO_LINE
borders.right = xlwt.Borders.NO_LINE
borders.top = xlwt.Borders.NO_LINE
borders.bottom = xlwt.Borders.THIN
borders.left_colour = 0x40
borders.right_colour = 0x40
borders.top_colour = 0x40
borders.bottom_colour = 0x40
style = xlwt.XFStyle() # Create Style
style.borders = borders # Add Borders to Style
return style
|
# 학습 데이터 불러오고 전처리하기
import pandas as pd
import itertools
# 1. 1차연도 어노테이터들이 적은 문장
music_by_numbers = pd.read_excel('./music_by_numbers.xlsx', engine='openpyxl')
dance_by_numbers = pd.read_excel('./dance_by_numbers.xlsx', engine='openpyxl')
visual_by_numbers = pd.read_excel('./visual_by_numbers.xlsx', engine='openpyxl')\
# 2. 대화 음성 오픈 데이터셋
# talking_data_1 = pd.read_excel('/Users/lifeofpy/Desktop/dataset_청각2/감정 분류를 위한 대화 음성 데이터셋_4차년도.xlsx', engine='openpyxl')
# talking_data_2 = pd.read_excel('/Users/lifeofpy/Desktop/dataset_청각2/감정 분류를 위한 대화 음성 데이터셋_5차년도_1차.xlsx', engine='openpyxl')
# talking_data_3 = pd.read_excel('/Users/lifeofpy/Desktop/dataset_청각2/감정 분류를 위한 대화 음성 데이터셋_5차년도_2차.xlsx', engine='openpyxl')
# 3. 한국어 발라드 가사 데이터셋
lyrics_ballad = open('./lyrics_ballad.txt', 'r').read().split('\n')
lyrics_ballad = [[x] for x in lyrics_ballad if x != ''] # 결측치 제거
# 데이터 전처리하기
music_by_numbers = music_by_numbers.drop(['Unnamed: 0'], axis=1)
dance_by_numbers = dance_by_numbers.drop(['Unnamed: 0'], axis=1)
visual_by_numbers = visual_by_numbers.drop(['Unnamed: 0'], axis=1)
# 데이터프레임 리스트로 바꾸기
def change_df_tolst(df):
word_list = df.values.tolist()
for i in range(len(word_list)):
for j in range(len(word_list[i])):
word_list[i][j] = word_list[i][j].split(", '")
word_list = list(itertools.chain.from_iterable(word_list))
return word_list
music_list = change_df_tolst(music_by_numbers)
dance_list = change_df_tolst(dance_by_numbers)
visual_list = change_df_tolst(visual_by_numbers)
# talking_data_1_list = talking_data_1[['발화문']].values.tolist()
# talking_data_2_list = talking_data_2[['발화문']].values.tolist()
# talking_data_3_list = talking_data_3[['발화문']].values.tolist()
# 데이터를 하나의 리스트로 합쳐줌(2차원 리스트 >> 1차원 리스트)
# Word2Vec 모델 학습을 위해 댓글 문장 nltk 를 사용해서 토크나이징
# tokens 파일로 저장하기
tokens = music_list + dance_list + visual_list + lyrics_ballad
# 문장 데이터의 총 개수
print(len(tokens))
|
from .models import Question, Option
from .serializers import QuestionSerializer, OptionSerializer, UserSerializer
from django.http import Http404
from django.contrib.auth.models import User
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import status
from rest_framework import permissions
from .permissions import IsAuthorOrReadOnly, IsOwnerParent
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from .tasks import sent_email_to_admin
properties = {
"text": openapi.Schema(
type=openapi.TYPE_STRING,
description="Text of the question",
default="question[num]",
),
"description": openapi.Schema(
type=openapi.TYPE_STRING,
description="some additional description",
default="description",
),
}
class QuestionsList(APIView):
"""
List all Questions, or create a new Questions.
"""
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get(self, request):
questions = Question.objects.all()
serializer = QuestionSerializer(questions, many=True)
return Response(serializer.data)
@swagger_auto_schema(
request_body=openapi.Schema(type=openapi.TYPE_OBJECT, properties=properties)
)
def post(self, request):
request.data["author"] = request.user.id
serializer = QuestionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
# send email to admin (admarin) when creating a new question
sent_email_to_admin.delay(
"INFO",
f"New Question: \"{request.data.get('text')}\" is created by {request.user} user",
"server@example.com",
["admarin@example.com"],
)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class QuestionDetail(APIView):
"""
Retrieve, update or delete a Questions instance.
"""
permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsAuthorOrReadOnly]
def get_object(self, pk):
try:
return Question.objects.get(pk=pk)
except Question.DoesNotExist:
raise Http404
def get(self, request, pk):
question = self.get_object(pk)
serializer = QuestionSerializer(question)
return Response(serializer.data)
@swagger_auto_schema(
request_body=openapi.Schema(type=openapi.TYPE_OBJECT, properties=properties)
)
def put(self, request, pk):
question = self.get_object(pk)
request.data["author"] = request.user.id
serializer = QuestionSerializer(question, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
question = self.get_object(pk)
question.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
option_properties = {
"text": openapi.Schema(
type=openapi.TYPE_STRING,
description="Text of the question",
default="question[num]",
)
}
class OptionsList(APIView):
"""
List all Options, or create a new Options.
"""
permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsOwnerParent]
def get(self, request, pk):
options = Option.objects.filter(question_id=pk).all()
serializer = OptionSerializer(options, many=True)
return Response(serializer.data)
@swagger_auto_schema(
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT, properties=option_properties
)
)
def post(self, request, pk):
request.data["author"] = request.user.id
question = Question.objects.get(pk=pk)
# use entered option id when create option or current question id of none
request.data["question"] = request.data.get("question", question.id)
serializer = OptionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
sent_email_to_admin.delay(
"INFO",
f"New Option: \"{request.data.get('text')}\" is created by {request.user} user",
"server@example.com",
["admarin@example.com"],
)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class OptionDetail(APIView):
"""
Retrieve, update or delete a Options instance.
"""
permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsAuthorOrReadOnly]
def get_object(self, question_pk, option_pk):
try:
return Option.objects.filter(question_id=question_pk).get(pk=option_pk)
except Option.DoesNotExist:
raise Http404
def get(self, request, question_pk, option_pk):
option = self.get_object(question_pk, option_pk)
serializer = OptionSerializer(option)
return Response(serializer.data)
@swagger_auto_schema(
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT, properties=option_properties
)
)
def put(self, request, question_pk, option_pk):
question = Question.objects.get(pk=question_pk)
# use entered option id when create option or current question id of none
request.data["question"] = request.data.get("question", question.id)
request.data["author"] = request.user.id
option = self.get_object(question_pk, option_pk)
serializer = OptionSerializer(option, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, question_pk, option_pk):
option = self.get_object(question_pk, option_pk)
option.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
|
from __future__ import annotations
from collections import OrderedDict
import torch
from torch.utils.data import Dataset, DataLoader
from torch import Tensor
from functools import partial
from typing import Sequence, Union, Any, List, Iterable, Optional, TypeVar, Dict
from imagenet.definitions_and_structures import DatasetThatSupportsTargets, INCProtocolIterator, INCProtocol, DatasetPart
T_idx = TypeVar('T_idx')
def tensor_as_list(sequence):
if isinstance(sequence, Tensor):
return sequence.tolist()
# Numpy already returns the correct format
# Example list(np.array([1, 2, 3])) returns [1, 2, 3]
# whereas list(torch.tensor([1, 2, 3])) returns [tensor(1), tensor(2), tensor(3)], which is "bad"
return list(sequence)
def tensor_as_set(sequence):
if isinstance(sequence, Tensor):
return set(sequence.tolist())
# Numpy already returns the correct format
# Example list(np.array([1, 2, 3])) returns [1, 2, 3]
# whereas list(torch.tensor([1, 2, 3])) returns [tensor(1), tensor(2), tensor(3)], which is "bad"
return set(sequence)
def __get_indexes_with_patterns_ordered_by_classes(sequence: Sequence[T_idx], search_elements: Sequence[T_idx],
sort_indexes: bool = True, sort_classes: bool = True,
class_mapping: Optional[Tensor] = None) -> Tensor:
# list() handles the situation in which search_elements is a torch.Tensor
# without it: result_per_class[element].append(idx) -> error
# as result_per_class[0] won't exist while result_per_class[tensor(0)] will
result_per_class: Dict[T_idx, List[int]] = OrderedDict()
result: List[int] = []
search_elements = tensor_as_list(search_elements)
sequence = tensor_as_list(sequence)
if class_mapping is not None:
class_mapping = tensor_as_list(class_mapping)
else:
class_mapping = list(range(max(search_elements) + 1))
if sort_classes:
search_elements = sorted(search_elements)
for search_element in search_elements:
result_per_class[search_element] = []
set_search_elements = set(search_elements)
for idx, element in enumerate(sequence):
if class_mapping[element] in set_search_elements:
result_per_class[class_mapping[element]].append(idx)
for search_element in search_elements:
if sort_indexes:
result_per_class[search_element].sort()
result.extend(result_per_class[search_element])
return torch.tensor(result, dtype=torch.int)
def __get_indexes_without_class_bucketing(sequence: Sequence[T_idx], search_elements: Sequence[T_idx],
sort_indexes: bool = False, class_mapping: Optional[Tensor] = None) -> Tensor:
sequence = tensor_as_list(sequence)
result: List[T_idx] = []
if class_mapping is not None:
class_mapping = tensor_as_list(class_mapping)
else:
class_mapping = list(range(max(search_elements) + 1))
search_elements = tensor_as_set(search_elements)
for idx, element in enumerate(sequence):
if class_mapping[element] in search_elements:
result.append(idx)
if sort_indexes:
result.sort()
return torch.tensor(result, dtype=torch.int)
def get_indexes_from_set(sequence: Sequence[T_idx], search_elements: Sequence[T_idx], bucket_classes: bool = True,
sort_classes: bool = False, sort_indexes: bool = False,
class_mapping: Optional[Tensor] = None) -> Tensor:
if bucket_classes:
return __get_indexes_with_patterns_ordered_by_classes(sequence, search_elements, sort_indexes=sort_indexes,
sort_classes=sort_classes, class_mapping=class_mapping)
else:
return __get_indexes_without_class_bucketing(sequence, search_elements, sort_indexes=sort_indexes,
class_mapping=class_mapping)
class ListsDataset(Dataset):
"""
A Dataset that applies transformations before returning patterns/targets
Also, this Dataset supports slicing
"""
def __init__(self, patterns, targets, transform=None, target_transform=None):
super().__init__()
self.transform = transform
self.target_transform = target_transform
self.patterns = patterns
self.targets = targets
def __getitem__(self, idx):
patterns: List[Any] = []
labels: List[Tensor] = []
indexes_iterator: Iterable[int]
treat_as_tensors: bool = True
# Makes dataset sliceable
if isinstance(idx, slice):
indexes_iterator = range(*idx.indices(len(self.patterns)))
elif isinstance(idx, int):
indexes_iterator = [idx]
else: # Should handle other types (ndarray, Tensor, Sequence, ...)
if hasattr(idx, 'shape') and len(getattr(idx, 'shape')) == 0: # Manages 0-d ndarray / Tensor
indexes_iterator = [int(idx)]
else:
indexes_iterator = idx
for single_idx in indexes_iterator:
pattern, label = self.__get_single_item(single_idx)
if not isinstance(pattern, Tensor):
treat_as_tensors = False
#pattern = pattern.unsqueeze(0)
label = torch.as_tensor(label)
#label = label.unsqueeze(0)
patterns.append(pattern)
labels.append(label)
if len(patterns) == 1:
if treat_as_tensors:
patterns[0] = patterns[0].squeeze(0)
labels[0] = labels[0].squeeze(0)
return patterns[0], labels[0]
else:
labels_cat = torch.cat([single_label.unsqueeze(0) for single_label in labels])
if treat_as_tensors:
patterns_cat = torch.cat([single_pattern.unsqueeze(0) for single_pattern in patterns])
return patterns_cat, labels_cat
else:
return patterns, labels_cat
def __len__(self):
return len(self.patterns)
def __get_single_item(self, idx: int):
pattern, label = self.patterns[idx], self.targets[idx]
if self.transform is not None:
pattern = self.transform(pattern)
if self.target_transform is not None:
label = self.target_transform(label)
return pattern, label
class TransformationDataset(Dataset, DatasetThatSupportsTargets):
"""
A Dataset that applies transformations before returning patterns/targets
Also, this Dataset supports slicing
"""
def __init__(self, dataset: DatasetThatSupportsTargets, transform=None, target_transform=None):
super().__init__()
self.transform = transform
self.target_transform = target_transform
self.dataset = dataset
self.preloaded_data = None
self.targets = dataset.targets
def __getitem__(self, idx):
patterns: List[Any] = []
labels: List[Tensor] = []
indexes_iterator: Iterable[int]
treat_as_tensors: bool = True
# Makes dataset sliceable
if isinstance(idx, slice):
indexes_iterator = range(*idx.indices(len(self.dataset)))
elif isinstance(idx, int):
indexes_iterator = [idx]
else: # Should handle other types (ndarray, Tensor, Sequence, ...)
if hasattr(idx, 'shape') and len(getattr(idx, 'shape')) == 0: # Manages 0-d ndarray / Tensor
indexes_iterator = [int(idx)]
else:
indexes_iterator = idx
for single_idx in indexes_iterator:
pattern, label = self.__get_single_item(single_idx)
if not isinstance(pattern, Tensor):
treat_as_tensors = False
#pattern = pattern.unsqueeze(0)
label = torch.as_tensor(label)
#label = label.unsqueeze(0)
patterns.append(pattern)
labels.append(label)
if len(patterns) == 1:
if treat_as_tensors:
patterns[0] = patterns[0].squeeze(0)
labels[0] = labels[0].squeeze(0)
return patterns[0], labels[0]
else:
labels_cat = torch.cat([single_label.unsqueeze(0) for single_label in labels])
if treat_as_tensors:
patterns_cat = torch.cat([single_pattern.unsqueeze(0) for single_pattern in patterns])
return patterns_cat, labels_cat
else:
return patterns, labels_cat
def __len__(self):
return len(self.dataset)
def preload_data(self, num_workers=4, batch_size=50):
if self.preloaded_data is not None:
return self
self.preloaded_data = []
patterns_loader = DataLoader(self.dataset, num_workers=num_workers,
shuffle=False, drop_last=False, batch_size=batch_size)
for patterns in patterns_loader: # patterns is a tuple patterns_x, patterns_y, ... = patterns
for pattern_idx in range(len(patterns[0])): # patterns[0] is patterns_x, this means that len(patterns[0]) == patterns_x.shape[0]
pattern_data = tuple()
for pattern_element_idx in range(len(patterns)):
pattern_data += (patterns[pattern_element_idx][pattern_idx],)
self.preloaded_data.append(pattern_data)
return self
def __get_single_item(self, idx: int):
if self.preloaded_data is not None:
return self.preloaded_data[idx]
# print(type(self.dataset))
pattern, label = self.dataset[idx]
# print(type(pattern))
# print(self.transform)
if self.transform is not None:
# print("asddddd")
# print(self.transform)
# print(pattern)
pattern = self.transform(pattern)
if self.target_transform is not None:
label = self.target_transform(label)
return pattern, label
class LazyClassMapping(Sequence[int]):
"""
Defines a lazy targets class_list_per_batch.
This class is used when in need of lazy populating a targets field whose
elements need to be filtered out (when subsetting, see
:class:`torch.utils.data.Subset`) and/or transformed (based on some
class_list_per_batch). This will allow for a more efficient memory usage as
the class_list_per_batch is done on the fly instead of actually allocating a
new list.
"""
def __init__(self, targets: Sequence[int],
indices: Union[Sequence[int], None],
mapping: Optional[Sequence[int]] = None):
self._targets = targets
self._mapping = mapping
self._indices = indices
def __len__(self):
if self._indices is None:
return len(self._targets)
return len(self._indices)
def __getitem__(self, item_idx) -> int:
if self._indices is not None:
subset_idx = self._indices[item_idx]
else:
subset_idx = item_idx
if self._mapping is not None:
return self._mapping[self._targets[subset_idx]]
return self._targets[subset_idx]
def __str__(self):
return '[' + \
', '.join([str(self[idx]) for idx in range(len(self))]) + \
']'
class TransformationSubset(Dataset):
def __init__(self, dataset: DatasetThatSupportsTargets, indices: Sequence[int],
transform=None, target_transform=None, class_mapping: Optional[Tensor] = None):
super().__init__()
self.dataset = TransformationDataset(dataset, transform=transform, target_transform=target_transform)
self.indices = indices
self.class_mapping = class_mapping
self.targets = LazyClassMapping(dataset.targets, indices,
mapping=class_mapping)
def __getitem__(self, idx) -> (Tensor, Tensor):
result = self.dataset[self.indices[idx]]
if self.class_mapping is not None:
return result[0], self.class_mapping[result[1]]
return result
def __len__(self) -> int:
return len(self.indices)
def make_transformation_subset(dataset: DatasetThatSupportsTargets, transform: Any, target_transform: Any,
class_mapping: Tensor, classes: Sequence[int],
bucket_classes=False, sort_classes=False, sort_indexes=False):
return TransformationSubset(dataset, get_indexes_from_set(dataset.targets, classes,
bucket_classes=bucket_classes,
sort_classes=sort_classes,
sort_indexes=sort_indexes,
class_mapping=class_mapping),
transform=transform, target_transform=target_transform, class_mapping=class_mapping)
class NCProtocol:
def __init__(self, train_dataset: DatasetThatSupportsTargets, test_dataset: DatasetThatSupportsTargets,
n_tasks: int, shuffle: bool = True, seed: Optional[int] = None,
train_transform=None, train_target_transform=None, test_transform=None, test_target_transform=None,
steal_transforms_from_datasets: bool = True,
fixed_class_order: Optional[Sequence[int]] = None, per_task_classes: Optional[Dict[int, int]] = None,
remap_class_indexes_in_ascending_order: bool = False):
self.train_dataset: DatasetThatSupportsTargets = train_dataset
self.test_dataset: DatasetThatSupportsTargets = test_dataset
self.validation_dataset: DatasetThatSupportsTargets = train_dataset
self.n_tasks: int = n_tasks
self.classes_order: Tensor = torch.unique(torch.tensor(train_dataset.targets))
self.train_transform = train_transform
self.train_target_transform = train_target_transform
self.test_transform = test_transform
self.test_target_transform = test_target_transform
self.remap_class_indexes_in_ascending_order = remap_class_indexes_in_ascending_order
self.n_classes = len(self.classes_order)
if n_tasks < 1:
raise ValueError('Invalid task number (n_tasks parameter): must be greater than 0')
if per_task_classes is not None:
if max(per_task_classes.keys()) >= n_tasks:
raise ValueError('Invalid task id in per_task_classes parameter: '
+ str(max(per_task_classes.keys())) + ': task ids must be in range [0, n_tasks)')
if sum(per_task_classes.values()) > self.n_classes:
raise ValueError('Insufficient number of classes: classes mapping defined in '
'per_task_classes parameter can\'t be satisfied')
if (self.n_classes - sum(per_task_classes.values())) % (n_tasks - len(per_task_classes)) > 0:
raise ValueError('Invalid number of tasks: classes contained in dataset cannot be divided by n_tasks')
default_per_task_classes = (self.n_classes - sum(per_task_classes.values())) // \
(n_tasks - len(per_task_classes))
self.classes_per_task: List[int] = [default_per_task_classes] * n_tasks
for task_id in per_task_classes:
self.classes_per_task[task_id] = per_task_classes[task_id]
else:
if self.n_classes % n_tasks > 0:
raise ValueError('Invalid number of tasks: classes contained in dataset cannot be divided by n_tasks')
self.classes_per_task: List[int] = [self.n_classes // n_tasks] * n_tasks
if fixed_class_order is not None:
self.classes_order = torch.tensor(fixed_class_order)
elif shuffle:
if seed is not None:
torch.random.manual_seed(seed)
self.classes_order = self.classes_order[torch.randperm(len(self.classes_order))]
if steal_transforms_from_datasets:
if hasattr(train_dataset, 'transform'):
self.train_transform = train_dataset.transform
train_dataset.transform = None
if hasattr(train_dataset, 'target_transform'):
self.train_target_transform = train_dataset.target_transform
train_dataset.target_transform = None
if hasattr(test_dataset, 'transform'):
self.test_transform = test_dataset.transform
test_dataset.transform = None
if hasattr(test_dataset, 'target_transform'):
self.test_target_transform = test_dataset.target_transform
test_dataset.target_transform = None
def __iter__(self) -> INCProtocolIterator:
return NCProtocolIterator(self)
def get_task_classes(self, task_id: int) -> Tensor:
classes_start_idx = sum(self.classes_per_task[:task_id])
classes_end_idx = classes_start_idx + self.classes_per_task[task_id]
if not self.remap_class_indexes_in_ascending_order:
return self.classes_order[classes_start_idx:classes_end_idx]
else:
return torch.arange(classes_start_idx, classes_end_idx, dtype=torch.long)
def get_task_classes_mapping(self) -> Tensor:
if not self.remap_class_indexes_in_ascending_order:
return torch.tensor(list(range(self.n_classes)))
classes_order_as_list = self.classes_order.tolist() # no index() method in Tensor :'(
return torch.tensor([classes_order_as_list.index(class_idx) for class_idx in range(self.n_classes)])
class NCProtocolIterator:
def __init__(self, protocol: INCProtocol,
swap_train_test_transformations: bool = False,
are_transformations_disabled: bool = False,
initial_current_task: int = -1):
self.current_task: int = -1
self.protocol: INCProtocol = protocol
self.are_train_test_transformations_swapped = swap_train_test_transformations
self.are_transformations_disabled = are_transformations_disabled
self.classes_seen_so_far: Tensor = torch.empty(0, dtype=torch.long)
self.classes_in_this_task: Tensor = torch.empty(0, dtype=torch.long)
self.prev_classes: Tensor = torch.empty(0, dtype=torch.long)
self.future_classes: Tensor = torch.empty(0, dtype=torch.long)
classes_mapping = self.protocol.get_task_classes_mapping()
if self.are_transformations_disabled:
self.train_subset_factory = partial(make_transformation_subset, self.protocol.train_dataset,
None, None, classes_mapping)
self.test_subset_factory = partial(make_transformation_subset, self.protocol.test_dataset,
None, None, classes_mapping)
else:
if self.are_train_test_transformations_swapped:
self.train_subset_factory = partial(make_transformation_subset, self.protocol.train_dataset,
self.protocol.test_transform, self.protocol.test_target_transform,
classes_mapping)
self.test_subset_factory = partial(make_transformation_subset, self.protocol.test_dataset,
self.protocol.train_transform, self.protocol.train_target_transform,
classes_mapping)
else:
self.train_subset_factory = partial(make_transformation_subset, self.protocol.train_dataset,
self.protocol.train_transform, self.protocol.train_target_transform,
classes_mapping)
self.test_subset_factory = partial(make_transformation_subset, self.protocol.test_dataset,
self.protocol.test_transform, self.protocol.test_target_transform,
classes_mapping)
for _ in range(initial_current_task+1):
self.__go_to_next_task()
def __next__(self) -> (DatasetThatSupportsTargets, INCProtocolIterator):
self.__go_to_next_task()
return self.get_current_training_set(), self
# Training set utils
def get_current_training_set(self, bucket_classes=False, sort_classes=False, sort_indexes=False) \
-> DatasetThatSupportsTargets:
return self.train_subset_factory(self.classes_in_this_task, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
def get_task_training_set(self, task_id: int, bucket_classes=False, sort_classes=False,
sort_indexes=False) -> DatasetThatSupportsTargets:
classes_in_required_task = self.protocol.get_task_classes(task_id)
return self.train_subset_factory(classes_in_required_task, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
def get_cumulative_training_set(self, include_current_task: bool = True, bucket_classes=False, sort_classes=False,
sort_indexes=False) -> DatasetThatSupportsTargets:
if include_current_task:
return self.train_subset_factory(self.classes_seen_so_far, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
else:
return self.train_subset_factory(self.prev_classes, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
def get_complete_training_set(self, bucket_classes=False, sort_classes=False, sort_indexes=False) \
-> DatasetThatSupportsTargets:
return self.train_subset_factory(self.protocol.classes_order, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
def get_future_training_set(self, bucket_classes=False, sort_classes=False, sort_indexes=False) \
-> DatasetThatSupportsTargets:
return self.train_subset_factory(self.future_classes, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
def get_training_set_part(self, dataset_part: DatasetPart, bucket_classes=False, sort_classes=False,
sort_indexes=False) -> DatasetThatSupportsTargets:
if dataset_part == DatasetPart.CURRENT_TASK:
return self.get_current_training_set(bucket_classes=bucket_classes, sort_classes=sort_classes,
sort_indexes=sort_indexes)
elif dataset_part == DatasetPart.CUMULATIVE:
return self.get_cumulative_training_set(include_current_task=True, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
elif dataset_part == DatasetPart.OLD:
return self.get_cumulative_training_set(include_current_task=False, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
elif dataset_part == DatasetPart.FUTURE:
return self.get_future_training_set(bucket_classes=bucket_classes, sort_classes=sort_classes,
sort_indexes=sort_indexes)
elif dataset_part == DatasetPart.COMPLETE_SET:
return self.get_complete_training_set(bucket_classes=bucket_classes, sort_classes=sort_classes,
sort_indexes=sort_indexes)
else:
raise ValueError('Unsupported dataset part')
# Test set utils
def get_current_test_set(self, bucket_classes=False, sort_classes=False, sort_indexes=False) \
-> DatasetThatSupportsTargets:
return self.test_subset_factory(self.classes_in_this_task, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
def get_cumulative_test_set(self, include_current_task: bool = True, bucket_classes=False, sort_classes=False,
sort_indexes=False) -> DatasetThatSupportsTargets:
if include_current_task:
return self.test_subset_factory(self.classes_seen_so_far, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
else:
return self.test_subset_factory(self.prev_classes, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
def get_task_test_set(self, task_id: int, bucket_classes=False, sort_classes=False, sort_indexes=False) \
-> DatasetThatSupportsTargets:
classes_in_required_task = self.protocol.get_task_classes(task_id)
return self.test_subset_factory(classes_in_required_task, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
def get_complete_test_set(self, bucket_classes=False, sort_classes=False, sort_indexes=False) \
-> DatasetThatSupportsTargets:
return self.test_subset_factory(self.protocol.classes_order, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
def get_future_test_set(self, bucket_classes=False, sort_classes=False, sort_indexes=False) \
-> DatasetThatSupportsTargets:
return self.test_subset_factory(self.future_classes, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
def get_test_set_part(self, dataset_part: DatasetPart, bucket_classes=False, sort_classes=False,
sort_indexes=False) \
-> DatasetThatSupportsTargets:
if dataset_part == DatasetPart.CURRENT_TASK:
return self.get_current_test_set(bucket_classes=bucket_classes, sort_classes=sort_classes,
sort_indexes=sort_indexes)
elif dataset_part == DatasetPart.CUMULATIVE:
return self.get_cumulative_test_set(include_current_task=True, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
elif dataset_part == DatasetPart.OLD:
return self.get_cumulative_test_set(include_current_task=False, bucket_classes=bucket_classes,
sort_classes=sort_classes, sort_indexes=sort_indexes)
elif dataset_part == DatasetPart.FUTURE:
return self.get_future_test_set(bucket_classes=bucket_classes, sort_classes=sort_classes,
sort_indexes=sort_indexes)
elif dataset_part == DatasetPart.COMPLETE_SET:
return self.get_complete_test_set(bucket_classes=bucket_classes, sort_classes=sort_classes,
sort_indexes=sort_indexes)
else:
raise ValueError('Unsupported dataset part')
# Transformation utility function. Useful if you want to test on the training set (using test transformations)
def swap_transformations(self) -> INCProtocolIterator:
return NCProtocolIterator(self.protocol,
swap_train_test_transformations=not self.are_train_test_transformations_swapped,
are_transformations_disabled=self.are_transformations_disabled,
initial_current_task=self.current_task)
def disable_transformations(self) -> INCProtocolIterator:
return NCProtocolIterator(self.protocol,
swap_train_test_transformations=self.are_train_test_transformations_swapped,
are_transformations_disabled=True,
initial_current_task=self.current_task)
def enable_transformations(self) -> INCProtocolIterator:
return NCProtocolIterator(self.protocol,
swap_train_test_transformations=self.are_train_test_transformations_swapped,
are_transformations_disabled=False,
initial_current_task=self.current_task)
def __get_tasks_classes(self, task_start: int, task_end: int = -1):
if task_end < 0:
task_end = self.protocol.n_tasks
all_classes = []
for task_idx in range(task_start, task_end):
all_classes.append(self.protocol.get_task_classes(task_idx))
if len(all_classes) == 0:
return torch.tensor([], dtype=torch.long)
return torch.cat(all_classes)
def __go_to_next_task(self):
if self.current_task == (self.protocol.n_tasks - 1):
raise StopIteration()
self.current_task += 1
classes_start_idx = sum(self.protocol.classes_per_task[:self.current_task])
classes_end_idx = classes_start_idx + self.protocol.classes_per_task[self.current_task]
self.classes_in_this_task = self.protocol.get_task_classes(self.current_task)
self.prev_classes = self.classes_seen_so_far
self.future_classes = self.__get_tasks_classes(classes_end_idx)
self.classes_seen_so_far = torch.cat([self.classes_seen_so_far, self.classes_in_this_task])
|
import numpy as np
from typing import Union, List, Dict, Iterable, Tuple, Callable
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import scipy.special as scsp
class Mesh:
def __init__(self, mesh_length=500, mesh_width=500, cellsize=2.e-6, mixed=False, label='None'):
self.width = mesh_width
self.length = mesh_length
self.cell_size = cellsize
self._mixed = mixed
self._name = label
self._mats = ['mat {}'.format(i+1) for i in range(9)]
self._mesh = np.array([[{'i': i,
'j': j,
'mat 1': 0.,
'mat 2': 0.,
'mat 3': 0.,
'mat 4': 0.,
'mat 5': 0.,
'mat 6': 0.,
'mat 7': 0.,
'mat 8': 0.,
'mat 9': 0.,
'vel i': 0.,
'vel j': 0.} for i in range(self.width)]
for j in range(self.length)])
def __repr__(self):
return '<Mesh: {} ({} x {})>'.format(self.name, self.width, self.length)
def plot(self, view: bool = True, save: bool = False, file_name: str = None):
if file_name is None:
file_name = './{}.png'.format(self.name)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
ax.set_title(self.name)
materials = {'mat {}'.format(i+1): np.zeros((self.width, self.length)) for i in range(9)}
for cell in self._mesh.flatten():
for i, mat in enumerate(self._mats):
materials[mat][cell['i'], cell['j']] = cell[mat]*(i+1)
x = np.linspace(0, self.width*self.cell_size, int(self.width))
y = np.linspace(0, self.length*self.cell_size, int(self.length))
xx, yy = np.meshgrid(x, y)
for mat_mesh in materials.values():
mat_mesh = np.ma.masked_where(mat_mesh == 0., mat_mesh)
im = ax.pcolormesh(xx, yy, mat_mesh, vmin=1, vmax=9)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cb = fig.colorbar(im, cax=cax)
cb.set_label('Material #')
if save:
fig.savefig(file_name)
if view:
plt.show()
def save(self, file_name: str = None, compress: bool = True):
if file_name is None:
file_name = 'meso_m_{}.iSALE'.format(self.name)
if compress:
file_name += '.gz'
num_cells = self._mesh.size
columns = {'i': np.zeros((num_cells)),
'j': np.zeros((num_cells)),
'vel i': np.zeros((num_cells)),
'vel j': np.zeros((num_cells))}
columns.update({'mat {}'.format(i+1): np.zeros((num_cells)) for i in range(9)})
for i, cell in enumerate(self._mesh.flatten()):
columns['i'][i] = cell['i']
columns['j'][i] = cell['j']
columns['vel i'][i] = cell['vel i']
columns['vel j'][i] = cell['vel j']
for j in range(9):
mat = 'mat {}'.format(j+1)
columns[mat][i] = cell[mat]
all_ = np.column_stack((columns['i'],
columns['j'],
columns['vel i'],
columns['vel j'],
*[columns['mat {}'.format(i+1)] for i in range(9)]))
head = '{}, {}'.format(num_cells, 9)
np.savetxt(file_name, all_, header=head, fmt='%5.3f', comments='')
@property
def contains_mixed_cells(self):
return self._mixed
@property
def name(self):
return self._name
def assign_velocity_to_rectangle(self, velocity: Tuple[float, float],
xlims: Tuple[float, float],
ylims: Tuple[float, float]):
for point in points_within_rectangle(self.cell_size, xlims, ylims):
cell = self._mesh[point]
self.add_velocity_to_cell(velocity, cell)
def assign_velocity_to_material(self, velocity: Tuple[float, float], material: int):
assert 0 < material <= 9, "material must be an integer between 1 and 9"
mat = 'mat {}'.format(material)
for cell in self._mesh.flatten():
if cell[mat] > 0.:
self.add_velocity_to_cell(velocity, cell)
def insert_rectangle(self, material: int, xlims: Tuple[float, float], ylims: Tuple[float, float], ):
assert 0 <= material <= 9, "material must be an integer between 0 and 9 (0 = Void)"
for point in points_within_rectangle(self.cell_size, xlims, ylims):
cell = self._mesh[point]
self.add_material_to_cell(material, cell)
def insert_circle(self, material: int, centre: Tuple[float, float], radius: float):
assert 0 <= material <= 9, "material must be an integer between 0 and 9 (0 = Void)"
for point in points_within_circle(self.cell_size, centre, radius):
cell = self._mesh[point]
self.add_material_to_cell(material, cell)
def insert_ellipse(self,
material: int,
centre: Tuple[float, float],
equivalent_radius: float,
eccentricity: float,
rotation: float = None):
assert 0 <= eccentricity < 1., "Eccentricity must reside on the interval [0, 1)"
assert 0 <= material <= 9, "material must be an integer between 0 and 9 (0 = Void)"
for point in points_within_ellipse(self.cell_size, centre, equivalent_radius, eccentricity, rotation):
cell = self._mesh[point]
self.add_material_to_cell(material, cell)
def add_material_to_cell(self, material: int, cell: Dict[str, Union[int, float]]):
if material == 0:
for mat in self._mats:
cell[mat] = 0.
cell['vel i'] = 0.
cell['vel j'] = 0.
else:
current_mat_fraction = sum([cell[m] for m in self._mats])
fraction_to_add = 1. - current_mat_fraction
if fraction_to_add > 0.:
cell['mat {}'.format(material)] += fraction_to_add
@staticmethod
def add_velocity_to_cell(velocity: Tuple[float, float], cell):
cell['vel i'] = velocity[0]
cell['vel j'] = velocity[1]
class SizeDistribution:
"""
A size distribution is typically represented by a CDF (cumulative distribution function).
This class creates one with user-specified CDF. CDFs are of the form 'frequency' vs 'var'
and in granular distributions the independent variable is typically krumbein phi, or radius,
however this class allows other types. 'frequency' is often volume (area in 2D) or weight.
Both options are available, as is pure dimensionless frequency. Phi and area are the defaults.
"""
def __init__(self, name: str):
self._type = None
self._mean = None
self._std = None
self._median = None
self._mode = None
self._variance = None
self._skew = None
self._cdf = None
self._limits = None
self._lambda = None
self._k = None
@classmethod
def custom_distribution(cls, name: str, func: Callable):
new = cls(name)
new._func = func
new._cdf = func
new._type = 'custom'
return new
@classmethod
def uniform_distribution(cls, name: str, size_limits: Tuple[float, float]):
uniform = cls(name)
uniform._type = 'uniform'
uniform._limits = size_limits
uniform._mean = .5*(sum(size_limits))
uniform._median = uniform._mean
uniform._variance = (1. / 12.) * (size_limits[1] - size_limits[0]) ** 2.
uniform._cdf = uniform._uniform
uniform._skew = 0.
return uniform
@classmethod
def normal_distribution(cls, name: str, mean: float, standard_deviation: float):
normal = cls(name)
normal._type = 'normal'
normal._mean = mean
normal._std = standard_deviation
normal._median = mean
normal._mode = mean
normal._variance = standard_deviation**2.
normal._skew = 0.
normal._cdf = normal._normal
return normal
@classmethod
def lognormal_distribution(cls, name: str, mu: float, standard_deviation: float):
lognormal = cls(name)
lognormal._type = 'lognormal'
lognormal._mean = np.exp(mu + 0.5 * standard_deviation ** 2.)
lognormal._std = standard_deviation
lognormal._median = np.exp(mu)
lognormal._mode = np.exp(mu - standard_deviation ** 2.)
lognormal._variance = (np.exp(standard_deviation ** 2.) - 1.) * np.exp(2. * mu + standard_deviation ** 2.)
lognormal._skew = (np.exp(standard_deviation ** 2.) + 2.) * np.sqrt(np.exp(standard_deviation ** 2.) - 1.)
lognormal._cdf = lognormal._lognormal
return lognormal
@classmethod
def weibull2_distribution(cls, name: str, scale_parameter: float, shape_parameter: float):
assert 0. <= scale_parameter, "the scale parameter must be >= 0, not {:2.2f}".format(scale_parameter)
assert 0. <= shape_parameter, "the shape parameter must be >= 0, not {:2.2f}".format(shape_parameter)
weibull2 = cls(name)
weibull2._type = 'weibull2'
weibull2._lambda = scale_parameter
weibull2._k = shape_parameter
weibull2._mean = scale_parameter * scsp.gamma(1. + 1. / shape_parameter)
weibull2._median = scale_parameter * (np.log(2.)) ** (1. / shape_parameter)
if shape_parameter > 1:
weibull2._mode = scale_parameter * ((shape_parameter - 1) / shape_parameter) ** (1. / shape_parameter)
else:
weibull2._mode = 0
weibull2._variance = (scale_parameter ** 2.) * \
(scsp.gamma(1. + 2. / shape_parameter) - (scsp.gamma(1. + 1. / shape_parameter)) ** 2.)
weibull2._skew = (scsp.gamma(1. + 3. / shape_parameter) * scale_parameter ** 3. -
3. * weibull2._mean * weibull2._variance - weibull2._mean ** 3.)
weibull2._skew /= weibull2._variance ** (3. / 2.)
weibull2._cdf = weibull2._weibull2
weibull2._type = 'weibull2'
return weibull2
def details(self):
deets = "distribution has the following properties:\n"
deets += "type: {}\n".format(self._type)
deets += "mean = {:2.3f}\n".format(self._mean)
deets += "median = {:2.3f}\n".format(self._median)
deets += "mode = {:2.3f}\n".format(self._mode)
deets += "variance = {:2.3f}\n".format(self._variance)
deets += "skewness = {:2.3f}\n".format(self._skew)
return deets
def frequency(self, x: float, dx: Tuple[float, float]):
"""
Integrates over the probability density function of the chosen distribution to return an estimated frequency
limits MUST be provided in the form of dx, which allows for uneven limits and is always applied as + and -
the given value of x. Returns the probability DENSITY! this must be converted to a useful value outside of
the function.
"""
if self._type == 'lognormal':
assert x >= 0., "ERROR: Lognormal distribution only works for input greater than 0"
f = np.float64(abs(self._cdf(x + dx[1]) - self._cdf(x - dx[0])))
return f
def _uniform(self, x: float):
"""
CDF for a uniform probability density function between minx and maxx
"""
assert self._limits is not None
min_x = self._limits[0]
max_x = self._limits[1]
f = (x - min_x) / (max_x - min_x)
if x < min_x:
f = 0.
elif x >= max_x:
f = 1.
return f
def _normal(self, x: float):
"""
CDF for a normal probability density function centred on mu with std sigma
"""
mu = self._mean
sigma = self._std
f = .5 * (1. + scsp.erf((x - mu) / (sigma * np.sqrt(2.))))
return f
def _lognormal(self, x: float):
"""
CDF for a log-normal probability density function centred on mu with std sigma
"""
mu = self._mean
sigma = self._std
f = .5 + .5 * scsp.erf((np.log(x) - mu) / (sigma * np.sqrt(2.)))
return f
def _weibull2(self, x: float):
"""
CDF for a Weibull 2-parameter distribution; lambda is the 'scale' of the distribution
k is the 'shape'. This distribution is typically used for PSDs generated by
grinding, milling, and crushing operations.
"""
assert self._lambda is not None
assert self._k is not None
lamb = self._lambda
k = self._k
if x >= 0:
f = 1. - np.exp(-(x / lamb) ** k)
else:
f = 0.
return f
class Ensemble:
def __init__(self, name: str, host_mesh: Mesh, size_distribution: SizeDistribution):
self.name = name
self._host = host_mesh
self._dist = size_distribution
def rotate_point(x: float, y: float, rot: float):
xct = x * np.cos(rot) - y * np.sin(rot)
yct = x * np.sin(rot) + y * np.cos(rot)
return xct, yct
def points_within_rectangle(cell_size: float, xlims: Tuple[float, float], ylims: Tuple[float, float]):
# points do not have to snap to the grid
xmin = xlims[0]/cell_size
xmax = xlims[1]/cell_size
ymin = ylims[0]/cell_size
ymax = ylims[1]/cell_size
imin, imax = int(xmin)-1, int(xmax)+1
jmin, jmax = int(ymin)-1, int(ymax)+1
valid_points = [(i, j)
for i in range(imin, imax)
for j in range(jmin, jmax)
if (xmin <= i+.5 <= xmax) and (ymin <= j+.5 <= ymax)]
return valid_points
def points_within_circle(cell_size: float, centre: Tuple[float, float], radius: float):
# points do not have to snap to the grid
radius = (radius/cell_size)
rad2 = radius**2.
xcentre = centre[0]/cell_size
ycentre = centre[1]/cell_size
imin, imax = int(xcentre-1.1*radius), int(xcentre+1.1*radius)
jmin, jmax = int(ycentre-1.1*radius), int(ycentre+1.1*radius)
valid_points = [(i, j)
for i in range(imin, imax)
for j in range(jmin, jmax)
if ((i+.5-xcentre)**2. + (j+.5-ycentre)**2. <= rad2)]
return valid_points
def points_within_ellipse(cell_size: float,
centre: Tuple[float, float],
equivalent_radius: float,
eccentricity: float,
rotation: float = None):
# A is the semi-major radius, B is the semi-minor radius
semi_major = equivalent_radius/((1. - eccentricity ** 2.) ** .25)
semi_minor = semi_major * np.sqrt(1. - eccentricity ** 2.)
semi_major /= cell_size
semi_minor /= cell_size
xcentre = centre[0]/cell_size
ycentre = centre[1]/cell_size
imin, imax = int(xcentre-1.5*semi_major), int(xcentre+1.5*semi_major)
jmin, jmax = int(ycentre-1.5*semi_major), int(ycentre+1.5*semi_major)
valid_points = []
for i in range(imin, imax):
for j in range(jmin, jmax):
if rotation is not None:
xc, yc = rotate_point(i+.5-xcentre, j+.5-ycentre, rotation)
else:
xc, yc = i+.5-xcentre, j+.5-ycentre
if (xc/semi_major)**2. + (yc/semi_minor)**2. <= 1.:
valid_points.append((i, j))
return valid_points
def translate_vertices(vertices: List[Tuple[float, float]],
new_centroid: Tuple[float, float],
old_centroid: Tuple[float, float] = (0., 0.)):
displacement = (new_centroid[0]-old_centroid[0], new_centroid[1]-old_centroid[1])
new_vertices = [(x+displacement[0], y+displacement[1]) for x, y in vertices]
return new_vertices
def points_within_polygon(cell_size: float,
vertices: Iterable[Tuple[float, float]],
rotation: float = None):
try:
from shapely.geometry import Polygon, Point
from shapely import affinity
except ImportError as exception:
print('{}; Shapely must be installed to use points_within_polygon'.format(exception))
raise
verts = [(vx/cell_size, vy/cell_size) for vx, vy in vertices]
poly = Polygon(verts)
if rotation is not None:
centroid = poly.centroid.coords[0]
zero_verts = translate_vertices(verts, (0., 0.), old_centroid=centroid)
rot_verts = [rotate_point(vx, vy, rotation) for vx, vy in zero_verts]
verts = translate_vertices(rot_verts, centroid, old_centroid=(0., 0.))
poly = Polygon(verts)
imin, jmin, imax, jmax = poly.bounds
imin, imax = int(imin), int(imax)
jmin, jmax = int(jmin), int(jmax)
valid_points = [(i, j)
for i in range(imin, imax)
for j in range(jmin, jmax)
if Point(i+.5, j+.5).within(poly)]
return valid_points
|
import asyncio
from aiohttp import web
import sys
import socketio
import pandas as pd
import ocakdeneme
import base64
sio = socketio.AsyncServer(async_mode='aiohttp',cors_allowed_origins="*")
app = web.Application()
sio.attach(app)
async def background_task():
"""Example of how to send server generated events to clients."""
count = 0
while True:
await sio.sleep(10)
count += 1
await sio.emit('my_response', {'data': 'Server generated event'})
async def index(request):
with open('app.html') as f:
return web.Response(text=f.read(), content_type='text/html')
@sio.event
async def my_event(asd, message):
print("mesaj",message)
await sio.emit('my_response', {'data': message['data']})
@sio.event
async def csv_is_comming(asd,in1):
print("csv is commig")
print(in1["base"])
asd = base64.b64decode(in1["base"])
#p rint(pd.read_csv(in1))
ocakdeneme.calmaBaslat(asd,in1["time"])
await sio.emit('my_response', in1)
@sio.event
async def connect(sid, environ):
print("gönüllerin efendisi hoşgelmiş")
await sio.emit('my_response', {'data': 'Connected', 'count': 0}, room=sid)
@sio.event
def disconnect(sid):
print('Client disconnected')
app.router.add_get('/', index)
if __name__ == '__main__':
sio.start_background_task(background_task)
web.run_app(app)
|
'''
Generic Device class for HLTAPI-based devices.
'''
__all__ = (
'Device',
)
from enum import Enum
import contextlib
import functools
import itertools
import logging
import time
try:
from hltapi.exceptions import HltapiError
except Exception:
class HltapiError(Exception):
pass
from genie.decorator import managedattribute
from genie.conf.base.attributes import AttributesHelper
import genie.libs.conf.device.tgen
from genie.libs.conf.stream.stream import Stream, StreamStats
logger = logging.getLogger(__name__)
class Device(genie.libs.conf.device.tgen.Device):
'''Base Device class for HLTAPI-based TGEN devices'''
class Hltapi(object):
'''HLTAPI abstraction object.
HLTAPI Device subclasses are encouraged to subclass Hltapi as well to customize HLTAPI calls to allow Vendor-specific requirements.
'''
device = managedattribute(
name='device',
type=managedattribute.auto_ref, # TODO Device is not finished declaring yet
gettype=managedattribute.auto_unref,
doc='''The HLTAPI-based Genie Device object''')
@property
def pyats_connection(self):
'''The pyATS connection used for HLTAPI access'''
connectionmgr = self.device.connectionmgr
try:
return connectionmgr.connections['hltapi']
except KeyError:
# TODO This might not be a HltApiConnection!?
return connectionmgr.connections[connectionmgr.default_alias]
@property
def tcl(self):
'''The Tcl interpreter instance.'''
return self.pyats_connection._tcl
@property
def tcl_namespace(self):
'''The Tcl namespace where HLTAPI vendor code is loaded.'''
return self.pyats_connection._ns
def __getattr__(self, name):
'''Redirect to undefined attributes to the pyATS connection.'''
if not name.startswith('_') and name != 'device':
return getattr(self.pyats_connection, name)
f = getattr(super(), '__getattr__', None)
if f is not None:
return f(name)
else:
raise AttributeError(name)
def __init__(self, device):
self.device = device
super().__init__()
hltapi = managedattribute(
name='hltapi',
read_only=True,
doc=Hltapi.__doc__)
@hltapi.initter
def hltapi(self):
'''Create an instance of Hltapi.
This can be a subclass of Hltapi if the Device subclass redefines it.
'''
return self.Hltapi(device=self)
@property
def all_port_handles(self):
pass # TODO hltspl_get_all_port_handles
@property
def tgen_port_handle_to_interface_map(self):
return {
interface.tgen_port_handle: interface \
for interface in self.tgen_port_interfaces}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_stream_stats(self, streams=None):
if streams is None:
streams = self.find_streams()
stats = StreamStats()
hltapi = self.hltapi
tcl = hltapi.tcl
#from genie.libs.conf.device.agilent import Device as AgilentDevice
from genie.libs.conf.device.ixia import Device as IxiaDevice
#from genie.libs.conf.device.pagent import Device as PagentDevice
#from genie.libs.conf.device.spirent import Device as SpirentDevice
map_stream_id_to_stream_obj = {}
for stream in streams:
stream_ids = stream.tgen_handle
if stream_ids:
for stream_id in stream_ids:
assert stream_id not in map_stream_id_to_stream_obj
map_stream_id_to_stream_obj[stream_id] = stream
else:
logger.warn('%r: Nothing to do (no tgen_handle).', stream)
if map_stream_id_to_stream_obj:
tgen_port_handle_to_interface_map = \
self.tgen_port_handle_to_interface_map
hltkwargs = {}
hltkwargs['port_handle'] = \
list(tgen_port_handle_to_interface_map.keys())
hltkwargs['mode'] = 'streams'
hltkwargs['streams'] = list(map_stream_id_to_stream_obj.keys())
stream_key_rename_map = {
'tx.pkt_rate': 'tx.total_pkt_rate',
'rx.pkt_rate': 'rx.total_pkt_rate',
'rx.loss_pkts': 'rx._dropped_pkts',
}
if isinstance(self, IxiaDevice):
stream_key_rename_map.update({
'rx.loss_pkts': 'rx._delta_pkts',
})
hltkl = hltapi.traffic_stats(**hltkwargs)
if hltkl.get('waiting_for_stats', False):
# if { $try <= 6 } {
# continue ;# retry
# } else {
# enaLogVerify -warning "Statistics not ready... giving up"
# }
raise NotImplementedError
for port_handle, rx_interface \
in tgen_port_handle_to_interface_map.items():
for stream_id, hltkl_stream \
in hltkl.get('{}.stream'.format(port_handle), {})\
.items():
stream_id = str(stream_id) # already string?
try:
stream = map_stream_id_to_stream_obj[stream_id]
except KeyError:
continue
if stream not in stats.by_stream:
stats.by_stream[stream] = StreamStats.ByStreamStats()
stream_stats = stats.by_stream[stream]
for kfrom, kto in stream_key_rename_map.items():
try:
v = hltkl_stream.pop(kfrom)
except KeyError:
continue
hltkl_stream.setdefault(kto, v)
if rx_interface not in stream_stats.by_interface:
stream_stats.by_interface[rx_interface] = StreamStats.ByInterfaceStreamStats()
intf_stream_stats = stream_stats.by_interface[rx_interface]
for k1, v1 in hltkl_stream.items():
if k1 in (
'tx',
'rx',
):
txrx_intf_stream_stats = getattr(intf_stream_stats, k1)
txrx_stream_stats = getattr(stream_stats, k1)
for k2, v2 in v1.items():
setattr(txrx_intf_stream_stats, k2, v2)
if k2 in (
'total_pkts', 'total_pkt_rate',
'dropped_pkts', '_dropped_pkts',
'duplicate_pkts', '_duplicate_pkts',
):
setattr(txrx_stream_stats, k2, \
(getattr(txrx_stream_stats, k2, 0) or 0) \
+ v2)
elif k2 in (
'encap',
):
if getattr(txrx_stream_stats, k2, None) is None:
setattr(txrx_stream_stats, k2, set())
getattr(txrx_stream_stats, k2).add(
v2)
else:
pass # TODO
elif k1 in (
'ipv4_present', 'ipv6_present',
'tcp_present', 'udp_present',
):
setattr(intf_stream_stats, k1, v1)
if not getattr(stream_stats, k1, False):
setattr(stream_stats, k1, v1)
else:
setattr(intf_stream_stats, k1, v1)
# XXXJST HACK for TGENs that can't return elapsed_time, such as
# Pagent
for stream, stream_stats in stats.by_stream.items():
if stream_stats.elapsed_time is None:
try:
stream_stats.elapsed_time = \
float(stream_stats.tx.total_pkts) \
/ float(stream_stats.tx.total_pkt_rate)
except Exception as e:
logger.debug('Stream %s: No relevant TX information to'
' derive elapsed_time from', stream)
else:
logger.debug('Stream %s: Derived elapsed_time from'
' tx.total_pkts/tx.total_pkt_rate', stream)
return stats
def restart_traffic(self, *, ports=None, learn=True, start=True, clear_on_start=True, wait_rx=True, rx_timeout=10, tx_timeout=10):
if ports is None:
ports = set(stream.source_tgen_interface for stream in self.find_streams())
port_handles = set(port.tgen_port_handle for port in ports)
klStreamsStats1 = None
klStreamsStats2 = None
if port_handles:
def do_poll_traffic_running():
nonlocal wait_rx
timeout = rx_timeout if wait_rx else tx_timeout
is_traffic_running = False
td = 0
t0 = time.perf_counter()
last_iteration = False
for iteration in itertools.count(start=1):
if last_iteration:
is_traffic_running = False
logger.info('Timeout waiting for traffic (%r>%r)',
td, timeout)
break
if iteration > 1:
t1 = time.perf_counter()
td = int(t1 - t0)
time.sleep(1)
if td > timeout:
last_iteration = True
if klStreamsStats1 is not None:
klStreamsStats2 = self.get_stream_stats()
klStreamsStats = klStreamsStats2 - klStreamsStats1
else:
klStreamsStats = self.get_stream_stats()
is_traffic_running = True
for stream in klStreamsStats.by_stream.keys():
if wait_rx:
# TODO get rx stats specific to MulticastGroup receivers
kl_rx = klStreamsStats.by_stream[stream].rx
rx_total_pkts = kl_rx.total_pkts
if rx_total_pkts is None:
rx_total_pkts = 0
if not (rx_total_pkts > 0):
is_traffic_running = False
logger.info('%r RX packets is %r; Not running!', stream, rx_total_pkts)
else:
logger.info('%r RX packets is %r; Ok.', stream, rx_total_pkts)
else:
tx_total_pkts = klStreamsStats.by_stream[stream].tx.total_pkts
if tx_total_pkts is None:
tx_total_pkts = 0
if not (tx_total_pkts > 0):
is_traffic_running = False
logger.info('%r TX packets is %r; Not running!', stream, tx_total_pkts)
else:
logger.info('%r TX packets is %r; Ok.', stream, tx_total_pkts)
if is_traffic_running:
break
if not is_traffic_running:
wait_rx = False
return is_traffic_running
if learn:
# Start?, Wait RX/TX (Learn MAC) {{{
if self.is_traffic_running():
klStreamsStats1 = self.get_stream_stats()
else:
os = self.os
if os in ('ixia', 'spirent'):
# Ixia and Spirent do not automatically clear stats; Avoid this step to save time.
self.traffic_control(mode='start',
ports=ports,
wait=False,
clear_on_start=False)
else: # ('agilent', 'pagent', ...)
# Always clears on start
self.traffic_control(mode='start',
ports=ports,
wait=False)
do_poll_traffic_running()
klStreamsStats1 = None
# }}}
# Stop, Clear+Start, Wait RX/TX {{{
self.traffic_control(mode='stop',
ports=ports)
if start:
self.traffic_control(mode='start',
ports=ports,
wait=False,
clear_on_start=clear_on_start)
do_poll_traffic_running()
# }}}
else:
logger.debug('Nothing to do.')
def start_traffic(self, **kwargs):
return self.traffic_control(mode='start', **kwargs)
def stop_traffic(self, **kwargs):
return self.traffic_control(mode='stop', **kwargs)
def is_traffic_running(self, **kwargs):
hltapi = self.hltapi
tcl = hltapi.tcl
hltkl = self.traffic_control('poll', **kwargs)
return not tcl.cast_boolean(hltkl.stopped)
def traffic_control(self, mode, *, ports=None, wait=True, duration=None, clear_on_start=True, stop_counting=True, **kwargs):
hltkl = None # Returned value
from genie.libs.conf.device.agilent import Device as AgilentDevice
from genie.libs.conf.device.ixia import Device as IxiaDevice
from genie.libs.conf.device.pagent import Device as PagentDevice
from genie.libs.conf.device.spirent import Device as SpirentDevice
hltapi = self.hltapi
tcl = hltapi.tcl
if ports is None:
ports = []
# for mcast_gorup in self.testbed.object_instances(cls=MulticastGroup):
# foreach sender [enaMcastGetMcastGroupParam $vMcastGroup -senders] {
# lassign $sender vTgenIntf stream_id
# if { [enaObjIsObject $vTgenIntf stream] } { continue } ;# done below
# lappend ports $vTgenIntf
# }
ports += [stream.source_tgen_interface
for stream in self.streams]
port_handles = list(set(
port if type(port) is str else port.tgen_port_handle
for port in ports))
if port_handles:
# Let hltapi object deal with no -port_handle vs "all"
if len(port_handles) == 1 and port_handles[0] == 'all':
port_handles = []
if mode == 'start':
hltkwargs = {}
if clear_on_start:
# Clear stats, if needed.
if isinstance(self, (IxiaDevice, SpirentDevice)):
self.traffic_control(mode='clear_stats', ports=['all'])
else:
pass # No need to clear stats
# Start traffic
if isinstance(self, IxiaDevice):
hltkwargs['action'] = 'sync_run'
if not clear_on_start:
# Makes a difference in pre-ixnetwork
hltkwargs['action'] = 'run'
elif isinstance(self, PagentDevice):
# NOTE Pagent: For streams with non-continuous transmit_mode, action should be "manual_trigger"
hltkwargs['action'] = 'run'
if not clear_on_start:
# Hack Pagent code to avoid "tgn clear count"
pass # TODO
# logger.debug('Disabling Pagent::traffic_control\'s "tgn clear count"')
# enaDestructor [xscale_save_proc ::Pagent::traffic_control]
# ::xscale::patch_proc_body ::Pagent::traffic_control \
# {(?n).*"tgn clear count".*} \
# ""
else:
hltkwargs['action'] = 'run'
if port_handles:
hltkwargs['port_handle'] = port_handles
if duration is not None:
# NOTE: Spirent: Make sure your Spirent has patch for
# SR #279953571 or at least version 3.70 or else
# duration will be blocking
hltkwargs['duration'] = duration
hltkl = hltapi.traffic_control(**hltkwargs)
# Wait
if wait is True:
if isinstance(self, SpirentDevice):
# action is asynchronous; poll or wait.
wait = 5
else:
# TBD
wait = 5
if wait:
logger.info('Waiting %s seconds for traffic start', wait)
time.sleep(wait)
elif mode == 'stop':
# Stop traffic
if isinstance(self, PagentDevice):
if not stop_counting:
# Hack Pagent code to avoid "pkts stop"
pass # TODO
# logger.debug('Disabling Pagent::traffic_control\'s "pkts stop"')
# enaDestructor [xscale_save_proc ::Pagent::traffic_control]
# ::xscale::patch_proc_body ::Pagent::traffic_control \
# {(?n).*"pkts stop".*} \
# ""
hltkwargs = {}
hltkwargs['action'] = 'stop'
if port_handles:
hltkwargs['port_handle'] = port_handles
if isinstance(self, SpirentDevice):
if int(tcl.eval('::package vcompare [::package require SpirentHltApi] 3.70')) >= 0:
# Tell STC HLTAPI 3.70+ to not save the EOT results database
hltkwargs['db_file'] = False
# Wait
if wait is True:
# Default trickle wait time for stop.
# It is assumed that the TGEN doesn't return until traffic
# is stopped. The extra wait time is mostly for packets to
# complete their path and stats to update.
wait = 2
if wait:
if isinstance(self, AgilentDevice):
# NOTE: Agilent already has it's own trickle_time
if int(tcl.call('::AgtInvoke', 'AgtTestController', 'IsTrickleTimeEnabled')):
wait = False
elif isinstance(self, IxiaDevice):
# Without max_wait_timer, Ixia goes into asynchronous mode
hltkwargs['max_wait_timer'] = 10
else:
pass # No default trickle time
for tryno in range(1, 3):
try:
hltkl = hltapi.traffic_control(**hltkwargs)
except HltapiError:
if tryno == 1 and isinstance(self, AgilentDevice):
# NOTE Agilent: Could fail to stop on first try!
continue
raise
break
if wait:
logger.info('Waiting %s seconds for traffic stop', wait)
time.sleep(wait)
elif mode == 'clear_stats' or True: # default!
# TODO wrap -mode/action "poll"
# No further special processing; straight HLTAPI/HLTSPL
hltkwargs = {}
hltkwargs['action'] = mode
if port_handles:
hltkwargs['port_handle'] = port_handles
hltkl = hltapi.traffic_control(**hltkwargs)
else:
logger.debug('Nothing to do.')
return hltkl
def start_emulation(self, **kwargs):
pass # TODO no emulation supported yet! raise NotImplementedError
def stop_emulation(self, **kwargs):
pass # TODO no emulation supported yet! raise NotImplementedError
def build_config(self, apply=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
# Don't call super().build_config
# Nothing to do.
return '' # No CLI lines
def build_unconfig(self, clean=False, apply=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
# Don't call super().build_unconfig
if clean:
pass # TODO
# Nothing to do.
return '' # No CLI lines
|
from __future__ import absolute_import, division, print_function
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity
and softmax loss that uses a modular layer design. We assume an
input dimension of D, a hidden dimension of H, and perform
classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead,
it will interact with a separate Solver object that is
responsible for running optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100,
num_classes=10, weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to
classify
- weight_scale: Scalar giving the standard deviation for
random initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
##############################################################
# TODO: Initialize the weights and biases of the two-layer #
# net. Weights should be initialized from a Gaussian with #
# standard deviation equal to weight_scale, and biases #
# should be initialized to zero. All weights and biases #
# should be stored in the dictionary self.params, with first #
# layer weights and biases using the keys 'W1' and 'b1' and #
# second layer weights and biases using the keys 'W2' and #
# 'b2'. #
##############################################################
self.D = input_dim
self.H = hidden_dim
self.C = num_classes
W1 = weight_scale * np.random.randn(self.D, self.H)
W2 = weight_scale * np.random.randn(self.H, self.C)
b1 = np.zeros(self.H)
b2 = np.zeros(self.C)
self.params.update({'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2})
##############################################################
# END OF YOUR CODE #
##############################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for
X[i].
Returns:
If y is None, then run a test-time forward pass of the model
and return:
- scores: Array of shape (N, C) giving classification scores,
where scores[i, c] is the classification score for X[i] and
class c.
If y is not None, then run a training-time forward and
backward pass and return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping
parameter names to gradients of the loss with respect to
those parameters.
"""
scores = None
##############################################################
# TODO: Implement the forward pass for the two-layer net, #
# computing the class scores for X and storing them in the #
# scores variable. #
##############################################################
# X1 := output of affine+relu on X
# X2 := ouyput of affine on X1
# Get the params
W1 = self.params['W1']
b1 = self.params['b1']
W2 = self.params['W2']
b2 = self.params['b2']
X1, cache1 = affine_relu_forward(X, W1, b1)
X2, cache2 = affine_forward(X1, W2, b2)
scores = X2
##############################################################
# END OF YOUR CODE #
##############################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
loss, grads = 0, {}
##############################################################
# TODO: Implement the backward pass for the two-layer net. #
# Store the loss in the loss variable and gradients in the #
# grads dictionary. Compute data loss using softmax, and #
# make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and #
# you pass the automated tests, make sure that your L2 #
# regularization includes a factor of 0.5 to simplify the #
# expression for the gradient. #
##############################################################
loss, dX2 = softmax_loss(X2, y)
dX1, dW2, db2 = affine_backward(dX2, cache2)
dX, dW1, db1 = affine_relu_backward(dX1, cache1)
# Regularize
loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
dW1 += self.reg * W1
dW2 += self.reg * W2
grads.update({'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2})
##############################################################
# END OF YOUR CODE #
##############################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of
idden layers, ReLU nonlinearities, and a softmax loss function.
This will also implement dropout and batch normalization as
options. For a network with L layers, the architecture will be
{affine - [batch norm] - relu - [dropout]} \
x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...}
block is repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored
in the self.params dictionary and will be learned using the
Solver class.
"""
def __init__(self, hidden_dims, input_dim=3 * 32 * 32,
num_classes=10, loss_func='softmax',
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each
hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to
classify.
- loss_func: [djn] Loss function ('softmax' or 'svm').
- dropout: Scalar between 0 and 1 giving dropout strength. If
dropout=0 then the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch
normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for
random initialization of the weights.
- dtype: A numpy datatype object; all computations will be
performed using this datatype. float32 is faster but less
accurate, so you should use float64 for numeric gradient
checking.
- seed: If not None, then pass this random seed to the dropout
layers. This will make the dropout layers deteriminstic so
we can gradient check the model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
##### [djn] #####
loss_funcs = {
'softmax': softmax_loss,
'svm': svm_loss,
}
if loss_func not in loss_funcs:
raise ValueError('Unknown loss function: {}'.format(
loss_func))
self.loss_func = loss_funcs[loss_func]
##############################################################
# TODO: Initialize the parameters of the network, storing #
# all values in the self.params dictionary. Store weights #
# and biases for the first layer in W1 and b1; for the #
# second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution with standard #
# deviation equal to weight_scale and biases should be #
# initialized to zero. #
# #
# When using batch normalization, store scale and shift #
# parameters for the first layer in gamma1 and beta1; for #
# the second layer use gamma2 and beta2, etc. Scale #
# parameters should be initialized to one and shift #
# parameters should be initialized to zero. #
##############################################################
dims = [input_dim]
dims.extend(hidden_dims)
dims.append(num_classes)
for i in xrange(0, self.num_layers):
W = weight_scale * np.random.randn(dims[i], dims[i + 1])
b = np.zeros(dims[i + 1])
self.params['W{}'.format(i + 1)] = W
self.params['b{}'.format(i + 1)] = b
if self.use_batchnorm and i < self.num_layers - 1:
gamma = np.ones(dims[i + 1])
beta = np.zeros(dims[i + 1])
self.params['gamma{}'.format(i + 1)] = gamma
self.params['beta{}'.format(i + 1)] = beta
self.dims = dims
##############################################################
# END OF YOUR CODE #
##############################################################
# When using dropout we need to pass a dropout_param
# dictionary to each dropout layer so that the layer knows the
# dropout probability and the mode
# (train / test). You can pass the same dropout_param to each
# dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running
# means and variances, so we need to pass a special bn_param
# object to each batch normalization layer. You should pass
# self.bn_params[0] to the forward pass of the first batch
# normalization layer, self.bn_params[1] to the forward pass
# of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'}
for i in xrange(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param
# since they behave differently during training and testing.
##if self.dropout_param is not None:
if self.use_dropout:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param['mode'] = mode
scores = None
##############################################################
# TODO: Implement the forward pass for the fully-connected #
# net, computing the class scores for X and storing them in #
# the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param #
# to each dropout forward pass. #
# #
# When using batch normalization, you'll need to pass #
# self.bn_params[0] to the forward pass for the first batch #
# normalization layer, pass self.bn_params[1] to the forward #
# pass for the second batch normalization layer, etc. #
##############################################################
caches = []
inp = X # input
out = None # output
# Iterate over each hidden layers
for i in xrange(self.num_layers - 1):
# affine layer
W = self.params['W{}'.format(i + 1)]
b = self.params['b{}'.format(i + 1)]
out, cache = affine_forward(inp, W, b)
caches.append(cache)
inp = out # for the next step
# batchnorm layer
if self.use_batchnorm:
bn_param = self.bn_params[i]
gamma = self.params['gamma{}'.format(i + 1)]
beta = self.params['beta{}'.format(i + 1)]
out, cache = batchnorm_forward(inp, gamma, beta,
bn_param)
caches.append(cache)
inp = out
# relu layer
out, cache = relu_forward(inp)
caches.append(cache)
inp = out
# dropout layer
if self.use_dropout:
out, cache = dropout_forward(inp, self.dropout_param)
caches.append(cache)
inp = out
# final affine layer
W = self.params['W{}'.format(self.num_layers)]
b = self.params['b{}'.format(self.num_layers)]
out, cache = affine_forward(inp, W, b)
caches.append(cache)
scores = out
##############################################################
# END OF YOUR CODE #
##############################################################
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
##############################################################
# TODO: Implement the backward pass for the fully-connected #
# net. Store the loss in the loss variable and gradients in #
# the grads dictionary. Compute data loss using softmax, and #
# make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch normalization, you don't need to #
# regularize the scale and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and #
# you pass the automated tests, make sure that your L2 #
# regularization includes a factor of 0.5 to simplify the #
# expression for the gradient. #
##############################################################
loss, dx = self.loss_func(scores, y)
# Last affine layer
cache = caches.pop()
dx, dw, db = affine_backward(dx, cache)
grads['W{}'.format(self.num_layers)] = dw
grads['b{}'.format(self.num_layers)] = db
# Iterate over hidden layers backwards
for i in xrange(self.num_layers - 1, 0, -1):
# dropout layer
if self.use_dropout:
cache = caches.pop()
dx = dropout_backward(dx, cache)
# relu layer
cache = caches.pop()
dx = relu_backward(dx, cache)
# batchnorm layer
if self.use_batchnorm:
cache = caches.pop()
dx, dgamma, dbeta = batchnorm_backward_alt(dx, cache)
grads['gamma{}'.format(i)] = dgamma
grads['beta{}'.format(i)] = dbeta
# affine layer
cache = caches.pop()
dx, dw, db = affine_backward(dx, cache)
grads['W{}'.format(i)] = dw
grads['b{}'.format(i)] = db
##############################################################
# END OF YOUR CODE #
##############################################################
return loss, grads
|
from django.urls import path, re_path, include
from app1 import views
urlpatterns = [
path('video', views.VideoView.as_view())
]
|
import time
import datetime
def stringify(dt):
return time.strftime("%Y-%m-%d %H:%M:%S",dt.timetuple())
def parse(s):
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
|
from flask import render_template, request, redirect, url_for, abort
from . import main
from ..models import User,Pizza,Roles,Topping,Crust
from flask_login import login_required,current_user
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template('profile/profile.html',user = user)
@main.route('/')
def pizza():
'''
pizza order page
'''
title = "Home"
return render_template('index.html', title = title)
|
from env import Maze
from agent import QLearningTable
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
episode_count = 1000 # Number of episodes to run the experiment
episodes = range(episode_count)
movements = [] # Number of movements happened in each episode
rewards = [] # The gained reward in each episode
'''
This function updates the position of the explorer in the Maze environment based on the actions it chooses.
'''
def run_experiment():
for episode in episodes:
print("Episode %s/%s." %(episode+1, episode_count))
# initial observation;
observation = env.reset()
moves = 0
while True:
# fresh env
env.render()
# Q-learning chooses action based on observation
# we convert observation to str since we want to use them as index for our DataFrame.
action = q_learning_agent.choose_action(str(observation)) # ToDo: call choose_action() method from the agent QLearningTable instance
# RL takes action and gets next observation and reward
observation_, reward, done = env.get_state_reward(action) # ToDo: call get_state_reward() method from Maze environment instance
moves +=1
# RL learn from the above transition,
# Update the Q value for the given tuple
q_learning_agent.learn(str(observation), action, reward, str(observation_))# ToDo: call learn method from Q-learning agent instance, passing (s, a, r, s') tuple
# consider the next observation
observation = observation_
# break while loop when end of this episode
if done:
movements.append(moves) # Keep track of the number of movements in this episode
rewards.append(reward) # Keep track of the gained reward in this episode
print("Reward: {0}, Moves: {1}".format(reward, moves))
break
# end of game
print('game over!')
# Show the results
plot_reward_movements()
def plot_reward_movements():
plt.figure()
plt.subplot(2,1,1)
plt.plot(episodes, movements)
plt.xlabel("Episode")
plt.ylabel("# Movements")
plt.subplot(2,1,2)
plt.step(episodes, rewards)
plt.xlabel("Episode")
plt.ylabel("Reward")
plt.savefig("rewards_movements_q_learn.png")
plt.show()
if __name__ == "__main__":
# Craete maze environment
env = Maze() #ToDo: instanciate Maze class
# Create Q-learning agent
q_learning_agent = QLearningTable(actions=list(range(env.n_actions))) #ToDo: instanciate QLearningTable class
# Call run_experiment() function once after given time in milliseconds.
env.window.after(10, run_experiment)
# The infinite loop used to run the application, wait for an event to occur and process the event
# till the window is not closed.
env.window.mainloop()
|
import os
from pathlib import Path
from kombu import Queue
from celery.app.base import Celery
from dotenv import load_dotenv
PACKAGE_PARENT = '.'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
env_path = Path('./') / '.env'
load_dotenv(dotenv_path=env_path)
queue_name = "queue_to_feed"
config = {
"broker_url": os.getenv("BROKER_URL"),
"result_backend": None,
"broker_pool_limit": 1,
"broker_heartbeat": None,
"broker_connection_timeout": 40,
"task_ignore_result": True,
"event_queue_expires": 60,
"worker_prefetch_multiplier": 1,
"worker_concurrency": 32,
"worker_hijack_root_logger": False,
"worker_max_tasks_per_child": 10,
"reject_on_worker_lost": True,
"buffer_while_offline": False
}
celery = Celery("tasks", broker=config["broker_url"])
celery.conf.update(**config)
celery.task_acks_late = True
celery.conf.broker_transport_options = {}
celery.conf.task_queues = (Queue(queue_name, routing_key='tasks.task_name',
**{'x-queue-mode': 'lazy', 'queue_arguments': {'x-queue-mode': 'lazy'}}),
)
|
""" MIDI-like encoding method similar to ???
Music Transformer:
"""
from typing import List, Tuple, Dict, Optional
import numpy as np
from miditoolkit import Instrument, Note, TempoChange
from .midi_tokenizer_base import MIDITokenizer, Event, detect_chords
from .constants import *
class MIDILikeEncoding(MIDITokenizer):
""" Structured MIDI encoding method as using in the Piano Inpainting Application
https://arxiv.org/abs/2107.05944
The token types follows the specific pattern:
Pitch -> Velocity -> Duration -> Time shift -> back to Pitch ...
NOTE: this encoding uses only "Time Shifts" events to move in the time, and only
from one note to another. Hence it is suitable to encode continuous sequences of
notes without long periods of silence. If your dataset contains music with long
pauses, you might handle them with an appropriate "time shift" dictionary
(which values are made from the beat_res dict) or with a different encoding.
NOTE: the original Structured MIDI Encoding doesn't use Chords tokens as its
purpose is to draw uniform token types transitions, you can still use them but
it will "break" this property
:param pitch_range: range of used MIDI pitches
:param beat_res: beat resolutions, with the form:
{(beat_x1, beat_x2): beat_res_1, (beat_x2, beat_x3): beat_res_2, ...}
The keys of the dict are tuples indicating a range of beats, ex 0 to 3 for the first bar
The values are the resolution, in frames per beat, of the given range, ex 8
:param nb_velocities: number of velocity bins
:param additional_tokens: specifies additional tokens (chords, empty bars, tempo...)
:param program_tokens: will add entries for MIDI programs in the dictionary, to use
in the case of multitrack generation for instance
:param params: can be a path to the parameter (json encoded) file or a dictionary
"""
def __init__(self, pitch_range: range = PITCH_RANGE, beat_res: Dict[Tuple[int, int], int] = BEAT_RES,
nb_velocities: int = NB_VELOCITIES, additional_tokens: Dict[str, bool] = ADDITIONAL_TOKENS,
program_tokens: bool = PROGRAM_TOKENS, params=None):
additional_tokens['Empty'] = False # Incompatible additional tokens
super().__init__(pitch_range, beat_res, nb_velocities, additional_tokens, program_tokens, params)
def track_to_tokens(self, track: Instrument) -> List[int]:
""" Converts a track (miditoolkit.Instrument object) into a sequence of tokens
(can probably be achieved faster with Mido objects)
:param track: MIDI track to convert
:return: sequence of corresponding tokens
"""
# Make sure the notes are sorted first by their onset (start) times, second by pitch
# notes.sort(key=lambda x: (x.start, x.pitch)) # done in midi_to_tokens
events = []
# Creates the Note On, Note Off and Velocity events
for n, note in enumerate(track.notes):
if note.pitch not in self.pitch_range: # Notes to low or to high are discarded
continue
# Note On
events.append(Event(
name='Note-On',
time=note.start,
value=note.pitch,
text=note.pitch))
# Velocity
velocity_index = (np.abs(self.velocity_bins - note.velocity)).argmin()
events.append(Event(
name='Velocity',
time=note.start,
value=velocity_index,
text=f'{note.velocity}/{self.velocity_bins[velocity_index]}'))
# Note Off
events.append(Event(
name='Note-Off',
time=note.end,
value=note.pitch,
text=note.pitch))
# Adds tempo events if specified
if self.additional_tokens['Tempo']:
for tempo_change in self.current_midi_metadata['tempo_changes']:
events.append(Event(
name='Tempo',
time=tempo_change.time,
value=(np.abs(self.tempo_bins - tempo_change.tempo)).argmin(),
text=tempo_change.tempo))
# Sorts events in the good order
events.sort(key=lambda x: x.time)
# Time Shift
current_tick = 0
for e, event in enumerate(events[:-1].copy()):
if event.time == current_tick:
continue
time_shift = event.time - current_tick
index = np.argmin(np.abs([ticks - time_shift for ticks in
self.durations_ticks[self.current_midi_metadata['time_division']]]))
events.append(Event(
name='Time-Shift',
time=current_tick,
value='.'.join(map(str, self.durations[index])),
text=f'{time_shift} ticks'))
current_tick = event.time
# Adds chord events if specified
if self.additional_tokens['Chord'] and not track.is_drum:
events += detect_chords(track.notes, self.current_midi_metadata['time_division'])
events.sort(key=lambda x: (x.time, self._order(x)))
return self.events_to_tokens(events)
def tokens_to_track(self, tokens: List[int], time_division: Optional[int] = TIME_DIVISION,
program: Optional[Tuple[int, bool]] = (0, False), default_duration: int = None) \
-> Tuple[Instrument, List[TempoChange]]:
""" Converts a sequence of tokens into a track object
:param tokens: sequence of tokens to convert
:param time_division: MIDI time division / resolution, in ticks/beat (of the MIDI to create)
:param program: the MIDI program of the produced track and if it drum, (default (0, False), piano)
:param default_duration: default duration (in ticks) in case a Note On event occurs without its associated
note off event. Leave None to discard Note On with no Note Off event.
:return: the miditoolkit instrument object and tempo changes
"""
events = self.tokens_to_events(tokens)
max_duration = (self.durations[-1][0] + self.durations[-1][1]) * time_division
name = 'Drums' if program[1] else MIDI_INSTRUMENTS[program[0]]['name']
instrument = Instrument(program[0], is_drum=program[1], name=name)
if self.additional_tokens['Tempo']:
tempo_changes = [TempoChange(TEMPO, -1)] # mock the first tempo change to optimize below
else: # default
tempo_changes = [TempoChange(TEMPO, 0)] * 2 # the first will be deleted at the end of the method
current_tick = 0
count = 0
while count < len(events):
if events[count].name == 'Note-On':
try:
if events[count + 1].name == 'Velocity':
pitch = int(events[count].value)
vel = int(self.velocity_bins[int(events[count + 1].value)])
# look for an associated note off event to get duration
offset_tick = 0
duration = 0
for i in range(count+1, len(events)):
if events[i].name == 'Note-Off' and int(events[i].value) == pitch:
duration = offset_tick
break
elif events[i].name == 'Time-Shift':
beat, pos, res = map(int, events[i].value.split('.'))
offset_tick += (beat * res + pos) * time_division // res
if offset_tick > max_duration: # will not look for Note Off beyond
break
if duration == 0 and default_duration is not None:
duration = default_duration
if duration != 0:
instrument.notes.append(Note(vel, pitch, current_tick, current_tick + duration))
count += 2
except IndexError as _:
count += 1
elif events[count].name == 'Time-Shift':
beat, pos, res = map(int, events[count].value.split('.'))
current_tick += (beat * res + pos) * time_division // res
count += 1
elif events[count].name == 'Tempo':
tempo = int(self.tempo_bins[int(events[count].value)])
if tempo != tempo_changes[-1].tempo:
tempo_changes.append(TempoChange(tempo, current_tick))
count += 1
else:
count += 1
del tempo_changes[0]
return instrument, tempo_changes
def _create_vocabulary(self, program_tokens: bool) -> Tuple[dict, dict, dict]:
""" Create the tokens <-> event dictionaries
These dictionaries are created arbitrary according to constants defined
at the top of this file.
Note that when using them (prepare_data method), there is no error-handling
so you must be sure that every case is covered by the dictionaries.
NOTE: token index 0 is often used as a padding index during training, it might
be preferable to leave it as it
:param program_tokens: creates tokens for MIDI programs in the dictionary
:return: the dictionaries, one for each translation
"""
event_to_token = {'PAD_None': 0} # starting at 1, token 0 is for padding
token_type_indices = {'Pad': [0]} # Empty is for empty bars
count = 1
# NOTE ON
token_type_indices['Note-On'] = list(range(count, count + len(self.pitch_range)))
for i in self.pitch_range:
event_to_token[f'Note-On_{i}'] = count
count += 1
# NOTE OFF
token_type_indices['Note-Off'] = list(range(count, count + len(self.pitch_range)))
for i in self.pitch_range:
event_to_token[f'Note-Off_{i}'] = count
count += 1
# VELOCITY
token_type_indices['Velocity'] = list(range(count, count + len(self.velocity_bins)))
for i in range(len(self.velocity_bins)):
event_to_token[f'Velocity_{i}'] = count
count += 1
# TIME SHIFTS
token_type_indices['Time-Shift'] = list(range(count, count + len(self.durations)))
for i in range(0, len(self.durations)):
event_to_token[f'Time-Shift_{".".join(map(str, self.durations[i]))}'] = count
count += 1
# CHORD
if self.additional_tokens['Chord']:
token_type_indices['Chord'] = list(range(count, count + 3 + len(CHORD_MAPS)))
for i in range(3, 6): # non recognized chords, just considers the nb of notes (between 3 and 5 only)
event_to_token[f'Chord_{i}'] = count
count += 1
for chord_quality in CHORD_MAPS: # classed chords
event_to_token[f'Chord_{chord_quality}'] = count
count += 1
# TEMPO
if self.additional_tokens['Tempo']:
token_type_indices['Tempo'] = list(range(count, count + len(self.tempo_bins)))
for i in range(len(self.tempo_bins)):
event_to_token[f'Tempo_{i}'] = count
count += 1
# PROGRAM
if program_tokens:
token_type_indices['Program'] = list(range(count, count + 129))
for program in range(-1, 128): # -1 is drums
event_to_token[f'Program_{program}'] = count
count += 1
token_to_event = {v: k for k, v in event_to_token.items()} # inversion
return event_to_token, token_to_event, token_type_indices
def _create_token_types_graph(self) -> Dict[str, List[str]]:
dic = dict()
if 'Program' in self.token_types_indices:
dic['Program'] = ['Note-On', 'Time-Shift']
dic['Note-On'] = ['Velocity']
dic['Velocity'] = ['Note-On', 'Time-Shift']
dic['Time-Shift'] = ['Note-Off', 'Note-On']
dic['Note-Off'] = ['Note-Off', 'Note-On', 'Time-Shift']
if self.additional_tokens['Chord']:
dic['Chord'] = ['Note-On']
dic['Time-Shift'] += ['Chord']
dic['Note-Off'] += ['Chord']
if self.additional_tokens['Tempo']:
dic['Time-Shift'] += ['Tempo']
dic['Tempo'] = ['Time-Shift', 'Note-On']
if self.additional_tokens['Chord']:
dic['Tempo'] += ['Chord']
return dic
@staticmethod
def _order(x: Event) -> int:
""" Helper function to sort events in the right order
:param x: event to get order index
:return: an order int
"""
if x.name == "Program":
return 0
elif x.name == "Note-Off":
return 1
elif x.name == 'Tempo':
return 2
elif x.name == "Chord":
return 3
elif x.name == "Time-Shift":
return 1000 # always last
else: # for other types of events, the order should be handle when inserting the events in the sequence
return 4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_instance
short_description: Manages instances and virtual machines on Apache CloudStack based clouds.
description:
- Deploy, start, update, scale, restart, restore, stop and destroy instances.
version_added: '2.0'
author: René Moser (@resmo)
options:
name:
description:
- Host name of the instance. C(name) can only contain ASCII letters.
- Name will be generated (UUID) by CloudStack if not specified and can not be changed afterwards.
- Either C(name) or C(display_name) is required.
type: str
display_name:
description:
- Custom display name of the instances.
- Display name will be set to I(name) if not specified.
- Either I(name) or I(display_name) is required.
type: str
group:
description:
- Group in where the new instance should be in.
type: str
state:
description:
- State of the instance.
type: str
default: present
choices: [ deployed, started, stopped, restarted, restored, destroyed, expunged, present, absent ]
service_offering:
description:
- Name or id of the service offering of the new instance.
- If not set, first found service offering is used.
type: str
cpu:
description:
- The number of CPUs to allocate to the instance, used with custom service offerings
type: int
cpu_speed:
description:
- The clock speed/shares allocated to the instance, used with custom service offerings
type: int
memory:
description:
- The memory allocated to the instance, used with custom service offerings
type: int
template:
description:
- Name, display text or id of the template to be used for creating the new instance.
- Required when using I(state=present).
- Mutually exclusive with I(iso) option.
type: str
iso:
description:
- Name or id of the ISO to be used for creating the new instance.
- Required when using I(state=present).
- Mutually exclusive with I(template) option.
type: str
template_filter:
description:
- Name of the filter used to search for the template or iso.
- Used for params I(iso) or I(template) on I(state=present).
- The filter C(all) was added in 2.6.
type: str
default: executable
choices: [ all, featured, self, selfexecutable, sharedexecutable, executable, community ]
aliases: [ iso_filter ]
version_added: '2.1'
hypervisor:
description:
- Name the hypervisor to be used for creating the new instance.
- Relevant when using I(state=present), but only considered if not set on ISO/template.
- If not set or found on ISO/template, first found hypervisor will be used.
- Possible values are C(KVM), C(VMware), C(BareMetal), C(XenServer), C(LXC), C(HyperV), C(UCS), C(OVM), C(Simulator).
type: str
keyboard:
description:
- Keyboard device type for the instance.
type: str
choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ]
networks:
description:
- List of networks to use for the new instance.
type: list
aliases: [ network ]
ip_address:
description:
- IPv4 address for default instance's network during creation.
type: str
ip6_address:
description:
- IPv6 address for default instance's network.
type: str
ip_to_networks:
description:
- "List of mappings in the form I({'network': NetworkName, 'ip': 1.2.3.4})"
- Mutually exclusive with I(networks) option.
type: list
aliases: [ ip_to_network ]
disk_offering:
description:
- Name of the disk offering to be used.
type: str
disk_size:
description:
- Disk size in GByte required if deploying instance from ISO.
type: int
root_disk_size:
description:
- Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup
(need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template)
type: int
security_groups:
description:
- List of security groups the instance to be applied to.
type: list
aliases: [ security_group ]
host:
description:
- Host on which an instance should be deployed or started on.
- Only considered when I(state=started) or instance is running.
- Requires root admin privileges.
type: str
version_added: '2.6'
domain:
description:
- Domain the instance is related to.
type: str
account:
description:
- Account the instance is related to.
type: str
project:
description:
- Name of the project the instance to be deployed in.
type: str
zone:
description:
- Name of the zone in which the instance should be deployed.
- If not set, default zone is used.
type: str
ssh_key:
description:
- Name of the SSH key to be deployed on the new instance.
type: str
affinity_groups:
description:
- Affinity groups names to be applied to the new instance.
type: list
aliases: [ affinity_group ]
user_data:
description:
- Optional data (ASCII) that can be sent to the instance upon a successful deployment.
- The data will be automatically base64 encoded.
- Consider switching to HTTP_POST by using I(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB.
type: str
force:
description:
- Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed.
type: bool
default: no
allow_root_disk_shrink:
description:
- Enables a volume shrinkage when the new size is smaller than the old one.
type: bool
default: no
version_added: '2.7'
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "If you want to delete all tags, set a empty list e.g. I(tags: [])."
type: list
aliases: [ tag ]
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: yes
details:
description:
- Map to specify custom parameters.
type: dict
version_added: '2.6'
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# NOTE: Names of offerings and ISOs depending on the CloudStack configuration.
- name: create a instance from an ISO
cs_instance:
name: web-vm-1
iso: Linux Debian 7 64-bit
hypervisor: VMware
project: Integration
zone: ch-zrh-ix-01
service_offering: 1cpu_1gb
disk_offering: PerfPlus Storage
disk_size: 20
networks:
- Server Integration
- Sync Integration
- Storage Integration
delegate_to: localhost
- name: for changing a running instance, use the 'force' parameter
cs_instance:
name: web-vm-1
display_name: web-vm-01.example.com
iso: Linux Debian 7 64-bit
service_offering: 2cpu_2gb
force: yes
delegate_to: localhost
# NOTE: user_data can be used to kickstart the instance using cloud-init yaml config.
- name: create or update a instance on Exoscale's public cloud using display_name.
cs_instance:
display_name: web-vm-1
template: Linux Debian 7 64-bit
service_offering: Tiny
ssh_key: john@example.com
tags:
- key: admin
value: john
- key: foo
value: bar
user_data: |
#cloud-config
packages:
- nginx
delegate_to: localhost
- name: create an instance with multiple interfaces specifying the IP addresses
cs_instance:
name: web-vm-1
template: Linux Debian 7 64-bit
service_offering: Tiny
ip_to_networks:
- network: NetworkA
ip: 10.1.1.1
- network: NetworkB
ip: 192.0.2.1
delegate_to: localhost
- name: ensure an instance is stopped
cs_instance:
name: web-vm-1
state: stopped
delegate_to: localhost
- name: ensure an instance is running
cs_instance:
name: web-vm-1
state: started
delegate_to: localhost
- name: remove an instance
cs_instance:
name: web-vm-1
state: absent
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the instance.
returned: success
type: str
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the instance.
returned: success
type: str
sample: web-01
display_name:
description: Display name of the instance.
returned: success
type: str
sample: web-01
group:
description: Group name of the instance is related.
returned: success
type: str
sample: web
created:
description: Date of the instance was created.
returned: success
type: str
sample: 2014-12-01T14:57:57+0100
password_enabled:
description: True if password setting is enabled.
returned: success
type: bool
sample: true
password:
description: The password of the instance if exists.
returned: if available
type: str
sample: Ge2oe7Do
ssh_key:
description: Name of SSH key deployed to instance.
returned: if available
type: str
sample: key@work
domain:
description: Domain the instance is related to.
returned: success
type: str
sample: example domain
account:
description: Account the instance is related to.
returned: success
type: str
sample: example account
project:
description: Name of project the instance is related to.
returned: success
type: str
sample: Production
default_ip:
description: Default IP address of the instance.
returned: success
type: str
sample: 10.23.37.42
default_ip6:
description: Default IPv6 address of the instance.
returned: if available
type: str
sample: 2a04:c43:c00:a07:4b4:beff:fe00:74
version_added: '2.6'
public_ip:
description: Public IP address with instance via static NAT rule.
returned: if available
type: str
sample: 1.2.3.4
iso:
description: Name of ISO the instance was deployed with.
returned: if available
type: str
sample: Debian-8-64bit
template:
description: Name of template the instance was deployed with.
returned: success
type: str
sample: Linux Debian 9 64-bit
template_display_text:
description: Display text of template the instance was deployed with.
returned: success
type: str
sample: Linux Debian 9 64-bit 200G Disk (2017-10-08-622866)
version_added: '2.6'
service_offering:
description: Name of the service offering the instance has.
returned: success
type: str
sample: 2cpu_2gb
zone:
description: Name of zone the instance is in.
returned: success
type: str
sample: ch-gva-2
state:
description: State of the instance.
returned: success
type: str
sample: Running
security_groups:
description: Security groups the instance is in.
returned: success
type: list
sample: '[ "default" ]'
affinity_groups:
description: Affinity groups the instance is in.
returned: success
type: list
sample: '[ "webservers" ]'
tags:
description: List of resource tags associated with the instance.
returned: success
type: list
sample: '[ { "key": "foo", "value": "bar" } ]'
hypervisor:
description: Hypervisor related to this instance.
returned: success
type: str
sample: KVM
host:
description: Hostname of hypervisor an instance is running on.
returned: success and instance is running
type: str
sample: host-01.example.com
version_added: '2.6'
instance_name:
description: Internal name of the instance (ROOT admin only).
returned: success
type: str
sample: i-44-3992-VM
user-data:
description: Optional data sent to the instance.
returned: success
type: str
sample: VXNlciBkYXRhIGV4YW1wbGUK
'''
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackInstance(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackInstance, self).__init__(module)
self.returns = {
'group': 'group',
'hypervisor': 'hypervisor',
'instancename': 'instance_name',
'publicip': 'public_ip',
'passwordenabled': 'password_enabled',
'password': 'password',
'serviceofferingname': 'service_offering',
'isoname': 'iso',
'templatename': 'template',
'templatedisplaytext': 'template_display_text',
'keypair': 'ssh_key',
'hostname': 'host',
}
self.instance = None
self.template = None
self.iso = None
def get_service_offering_id(self):
service_offering = self.module.params.get('service_offering')
service_offerings = self.query_api('listServiceOfferings')
if service_offerings:
if not service_offering:
return service_offerings['serviceoffering'][0]['id']
for s in service_offerings['serviceoffering']:
if service_offering in [s['name'], s['id']]:
return s['id']
self.fail_json(msg="Service offering '%s' not found" % service_offering)
def get_host_id(self):
host_name = self.module.params.get('host')
if not host_name:
return None
args = {
'type': 'routing',
'zoneid': self.get_zone(key='id'),
}
hosts = self.query_api('listHosts', **args)
if hosts:
for h in hosts['host']:
if h['name'] == host_name:
return h['id']
self.fail_json(msg="Host '%s' not found" % host_name)
def get_template_or_iso(self, key=None):
template = self.module.params.get('template')
iso = self.module.params.get('iso')
if not template and not iso:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'isrecursive': True,
'fetch_list': True,
}
if template:
if self.template:
return self._get_by_key(key, self.template)
rootdisksize = self.module.params.get('root_disk_size')
args['templatefilter'] = self.module.params.get('template_filter')
args['fetch_list'] = True
templates = self.query_api('listTemplates', **args)
if templates:
for t in templates:
if template in [t['displaytext'], t['name'], t['id']]:
if rootdisksize and t['size'] > rootdisksize * 1024 ** 3:
continue
self.template = t
return self._get_by_key(key, self.template)
if rootdisksize:
more_info = " (with size <= %s)" % rootdisksize
else:
more_info = ""
self.module.fail_json(msg="Template '%s' not found%s" % (template, more_info))
elif iso:
if self.iso:
return self._get_by_key(key, self.iso)
args['isofilter'] = self.module.params.get('template_filter')
args['fetch_list'] = True
isos = self.query_api('listIsos', **args)
if isos:
for i in isos:
if iso in [i['displaytext'], i['name'], i['id']]:
self.iso = i
return self._get_by_key(key, self.iso)
self.module.fail_json(msg="ISO '%s' not found" % iso)
def get_instance(self):
instance = self.instance
if not instance:
instance_name = self.get_or_fallback('name', 'display_name')
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'fetch_list': True,
}
# Do not pass zoneid, as the instance name must be unique across zones.
instances = self.query_api('listVirtualMachines', **args)
if instances:
for v in instances:
if instance_name.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
self.instance = v
break
return self.instance
def _get_instance_user_data(self, instance):
# Query the user data if we need to
if 'userdata' in instance:
return instance['userdata']
user_data = ""
if self.get_user_data() is not None and instance.get('id'):
res = self.query_api('getVirtualMachineUserData', virtualmachineid=instance['id'])
user_data = res['virtualmachineuserdata'].get('userdata', "")
return user_data
def get_iptonetwork_mappings(self):
network_mappings = self.module.params.get('ip_to_networks')
if network_mappings is None:
return
if network_mappings and self.module.params.get('networks'):
self.module.fail_json(msg="networks and ip_to_networks are mutually exclusive.")
network_names = [n['network'] for n in network_mappings]
ids = self.get_network_ids(network_names)
res = []
for i, data in enumerate(network_mappings):
res.append({'networkid': ids[i], 'ip': data['ip']})
return res
def get_ssh_keypair(self, key=None, name=None, fail_on_missing=True):
ssh_key_name = name or self.module.params.get('ssh_key')
if ssh_key_name is None:
return
args = {
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
'name': ssh_key_name,
}
ssh_key_pairs = self.query_api('listSSHKeyPairs', **args)
if 'sshkeypair' in ssh_key_pairs:
return self._get_by_key(key=key, my_dict=ssh_key_pairs['sshkeypair'][0])
elif fail_on_missing:
self.module.fail_json(msg="SSH key not found: %s" % ssh_key_name)
def ssh_key_has_changed(self):
ssh_key_name = self.module.params.get('ssh_key')
if ssh_key_name is None:
return False
# Fails if keypair for param is inexistent
param_ssh_key_fp = self.get_ssh_keypair(key='fingerprint')
# CloudStack 4.5 does return keypair on instance for a non existent key.
instance_ssh_key_name = self.instance.get('keypair')
if instance_ssh_key_name is None:
return True
# Get fingerprint for keypair of instance but do not fail if inexistent.
instance_ssh_key_fp = self.get_ssh_keypair(key='fingerprint', name=instance_ssh_key_name, fail_on_missing=False)
if not instance_ssh_key_fp:
return True
# Compare fingerprints to ensure the keypair changed
if instance_ssh_key_fp != param_ssh_key_fp:
return True
return False
def security_groups_has_changed(self):
security_groups = self.module.params.get('security_groups')
if security_groups is None:
return False
security_groups = [s.lower() for s in security_groups]
instance_security_groups = self.instance.get('securitygroup') or []
instance_security_group_names = []
for instance_security_group in instance_security_groups:
if instance_security_group['name'].lower() not in security_groups:
return True
else:
instance_security_group_names.append(instance_security_group['name'].lower())
for security_group in security_groups:
if security_group not in instance_security_group_names:
return True
return False
def get_network_ids(self, network_names=None):
if network_names is None:
network_names = self.module.params.get('networks')
if not network_names:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'fetch_list': True,
}
networks = self.query_api('listNetworks', **args)
if not networks:
self.module.fail_json(msg="No networks available")
network_ids = []
network_displaytexts = []
for network_name in network_names:
for n in networks:
if network_name in [n['displaytext'], n['name'], n['id']]:
network_ids.append(n['id'])
network_displaytexts.append(n['name'])
break
if len(network_ids) != len(network_names):
self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts)
return network_ids
def present_instance(self, start_vm=True):
instance = self.get_instance()
if not instance:
instance = self.deploy_instance(start_vm=start_vm)
else:
instance = self.recover_instance(instance=instance)
instance = self.update_instance(instance=instance, start_vm=start_vm)
# In check mode, we do not necessarily have an instance
if instance:
instance = self.ensure_tags(resource=instance, resource_type='UserVm')
# refresh instance data
self.instance = instance
return instance
def get_user_data(self):
user_data = self.module.params.get('user_data')
if user_data is not None:
user_data = to_text(base64.b64encode(to_bytes(user_data)))
return user_data
def get_details(self):
details = self.module.params.get('details')
cpu = self.module.params.get('cpu')
cpu_speed = self.module.params.get('cpu_speed')
memory = self.module.params.get('memory')
if all([cpu, cpu_speed, memory]):
details.extends({
'cpuNumber': cpu,
'cpuSpeed': cpu_speed,
'memory': memory,
})
return details
def deploy_instance(self, start_vm=True):
self.result['changed'] = True
networkids = self.get_network_ids()
if networkids is not None:
networkids = ','.join(networkids)
args = {}
args['templateid'] = self.get_template_or_iso(key='id')
if not args['templateid']:
self.module.fail_json(msg="Template or ISO is required.")
args['zoneid'] = self.get_zone(key='id')
args['serviceofferingid'] = self.get_service_offering_id()
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['diskofferingid'] = self.get_disk_offering(key='id')
args['networkids'] = networkids
args['iptonetworklist'] = self.get_iptonetwork_mappings()
args['userdata'] = self.get_user_data()
args['keyboard'] = self.module.params.get('keyboard')
args['ipaddress'] = self.module.params.get('ip_address')
args['ip6address'] = self.module.params.get('ip6_address')
args['name'] = self.module.params.get('name')
args['displayname'] = self.get_or_fallback('display_name', 'name')
args['group'] = self.module.params.get('group')
args['keypair'] = self.get_ssh_keypair(key='name')
args['size'] = self.module.params.get('disk_size')
args['startvm'] = start_vm
args['rootdisksize'] = self.module.params.get('root_disk_size')
args['affinitygroupnames'] = self.module.params.get('affinity_groups')
args['details'] = self.get_details()
args['securitygroupnames'] = self.module.params.get('security_groups')
args['hostid'] = self.get_host_id()
template_iso = self.get_template_or_iso()
if 'hypervisor' not in template_iso:
args['hypervisor'] = self.get_hypervisor()
instance = None
if not self.module.check_mode:
instance = self.query_api('deployVirtualMachine', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
return instance
def update_instance(self, instance, start_vm=True):
# Service offering data
args_service_offering = {
'id': instance['id'],
}
if self.module.params.get('service_offering'):
args_service_offering['serviceofferingid'] = self.get_service_offering_id()
service_offering_changed = self.has_changed(args_service_offering, instance)
# Instance data
args_instance_update = {
'id': instance['id'],
'userdata': self.get_user_data(),
}
instance['userdata'] = self._get_instance_user_data(instance)
args_instance_update['ostypeid'] = self.get_os_type(key='id')
if self.module.params.get('group'):
args_instance_update['group'] = self.module.params.get('group')
if self.module.params.get('display_name'):
args_instance_update['displayname'] = self.module.params.get('display_name')
instance_changed = self.has_changed(args_instance_update, instance)
ssh_key_changed = self.ssh_key_has_changed()
security_groups_changed = self.security_groups_has_changed()
# Volume data
args_volume_update = {}
root_disk_size = self.module.params.get('root_disk_size')
root_disk_size_changed = False
if root_disk_size is not None:
res = self.query_api('listVolumes', type='ROOT', virtualmachineid=instance['id'])
[volume] = res['volume']
size = volume['size'] >> 30
args_volume_update['id'] = volume['id']
args_volume_update['size'] = root_disk_size
shrinkok = self.module.params.get('allow_root_disk_shrink')
if shrinkok:
args_volume_update['shrinkok'] = shrinkok
root_disk_size_changed = root_disk_size != size
changed = [
service_offering_changed,
instance_changed,
security_groups_changed,
ssh_key_changed,
root_disk_size_changed,
]
if any(changed):
force = self.module.params.get('force')
instance_state = instance['state'].lower()
if instance_state == 'stopped' or force:
self.result['changed'] = True
if not self.module.check_mode:
# Ensure VM has stopped
instance = self.stop_instance()
instance = self.poll_job(instance, 'virtualmachine')
self.instance = instance
# Change service offering
if service_offering_changed:
res = self.query_api('changeServiceForVirtualMachine', **args_service_offering)
instance = res['virtualmachine']
self.instance = instance
# Update VM
if instance_changed or security_groups_changed:
if security_groups_changed:
args_instance_update['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
res = self.query_api('updateVirtualMachine', **args_instance_update)
instance = res['virtualmachine']
self.instance = instance
# Reset SSH key
if ssh_key_changed:
# SSH key data
args_ssh_key = {}
args_ssh_key['id'] = instance['id']
args_ssh_key['projectid'] = self.get_project(key='id')
args_ssh_key['keypair'] = self.module.params.get('ssh_key')
instance = self.query_api('resetSSHKeyForVirtualMachine', **args_ssh_key)
instance = self.poll_job(instance, 'virtualmachine')
self.instance = instance
# Root disk size
if root_disk_size_changed:
async_result = self.query_api('resizeVolume', **args_volume_update)
self.poll_job(async_result, 'volume')
# Start VM again if it was running before
if instance_state == 'running' and start_vm:
instance = self.start_instance()
else:
self.module.warn("Changes won't be applied to running instances. "
"Use force=true to allow the instance %s to be stopped/started." % instance['name'])
# migrate to other host
host_changed = all([
instance['state'].lower() in ['starting', 'running'],
instance.get('hostname') is not None,
self.module.params.get('host') is not None,
self.module.params.get('host') != instance.get('hostname')
])
if host_changed:
self.result['changed'] = True
args_host = {
'virtualmachineid': instance['id'],
'hostid': self.get_host_id(),
}
if not self.module.check_mode:
res = self.query_api('migrateVirtualMachine', **args_host)
instance = self.poll_job(res, 'virtualmachine')
return instance
def recover_instance(self, instance):
if instance['state'].lower() in ['destroying', 'destroyed']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('recoverVirtualMachine', id=instance['id'])
instance = res['virtualmachine']
return instance
def absent_instance(self):
instance = self.get_instance()
if instance:
if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('destroyVirtualMachine', id=instance['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(res, 'virtualmachine')
return instance
def expunge_instance(self):
instance = self.get_instance()
if instance:
res = {}
if instance['state'].lower() in ['destroying', 'destroyed']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('destroyVirtualMachine', id=instance['id'], expunge=True)
elif instance['state'].lower() not in ['expunging']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('destroyVirtualMachine', id=instance['id'], expunge=True)
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self.poll_job(res, 'virtualmachine')
return instance
def stop_instance(self):
instance = self.get_instance()
# in check mode instance may not be instanciated
if instance:
if instance['state'].lower() in ['stopping', 'stopped']:
return instance
if instance['state'].lower() in ['starting', 'running']:
self.result['changed'] = True
if not self.module.check_mode:
instance = self.query_api('stopVirtualMachine', id=instance['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
return instance
def start_instance(self):
instance = self.get_instance()
# in check mode instance may not be instanciated
if instance:
if instance['state'].lower() in ['starting', 'running']:
return instance
if instance['state'].lower() in ['stopped', 'stopping']:
self.result['changed'] = True
if not self.module.check_mode:
args = {
'id': instance['id'],
'hostid': self.get_host_id(),
}
instance = self.query_api('startVirtualMachine', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
return instance
def restart_instance(self):
instance = self.get_instance()
# in check mode instance may not be instanciated
if instance:
if instance['state'].lower() in ['running', 'starting']:
self.result['changed'] = True
if not self.module.check_mode:
instance = self.query_api('rebootVirtualMachine', id=instance['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
elif instance['state'].lower() in ['stopping', 'stopped']:
instance = self.start_instance()
return instance
def restore_instance(self):
instance = self.get_instance()
self.result['changed'] = True
# in check mode instance may not be instanciated
if instance:
args = {}
args['templateid'] = self.get_template_or_iso(key='id')
args['virtualmachineid'] = instance['id']
res = self.query_api('restoreVirtualMachine', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(res, 'virtualmachine')
return instance
def get_result(self, instance):
super(AnsibleCloudStackInstance, self).get_result(instance)
if instance:
self.result['user_data'] = self._get_instance_user_data(instance)
if 'securitygroup' in instance:
security_groups = []
for securitygroup in instance['securitygroup']:
security_groups.append(securitygroup['name'])
self.result['security_groups'] = security_groups
if 'affinitygroup' in instance:
affinity_groups = []
for affinitygroup in instance['affinitygroup']:
affinity_groups.append(affinitygroup['name'])
self.result['affinity_groups'] = affinity_groups
if 'nic' in instance:
for nic in instance['nic']:
if nic['isdefault']:
if 'ipaddress' in nic:
self.result['default_ip'] = nic['ipaddress']
if 'ip6address' in nic:
self.result['default_ip6'] = nic['ip6address']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(),
display_name=dict(),
group=dict(),
state=dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'restored', 'absent', 'destroyed', 'expunged'], default='present'),
service_offering=dict(),
cpu=dict(type='int'),
cpu_speed=dict(type='int'),
memory=dict(type='int'),
template=dict(),
iso=dict(),
template_filter=dict(
default="executable",
aliases=['iso_filter'],
choices=['all', 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']
),
networks=dict(type='list', aliases=['network']),
ip_to_networks=dict(type='list', aliases=['ip_to_network']),
ip_address=dict(),
ip6_address=dict(),
disk_offering=dict(),
disk_size=dict(type='int'),
root_disk_size=dict(type='int'),
keyboard=dict(type='str', choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us']),
hypervisor=dict(),
host=dict(),
security_groups=dict(type='list', aliases=['security_group']),
affinity_groups=dict(type='list', aliases=['affinity_group']),
domain=dict(),
account=dict(),
project=dict(),
user_data=dict(),
zone=dict(),
ssh_key=dict(),
force=dict(type='bool', default=False),
tags=dict(type='list', aliases=['tag']),
details=dict(type='dict'),
poll_async=dict(type='bool', default=True),
allow_root_disk_shrink=dict(type='bool', default=False),
))
required_together = cs_required_together()
required_together.extend([
['cpu', 'cpu_speed', 'memory'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
required_one_of=(
['display_name', 'name'],
),
mutually_exclusive=(
['template', 'iso'],
),
supports_check_mode=True
)
acs_instance = AnsibleCloudStackInstance(module)
state = module.params.get('state')
if state in ['absent', 'destroyed']:
instance = acs_instance.absent_instance()
elif state in ['expunged']:
instance = acs_instance.expunge_instance()
elif state in ['restored']:
acs_instance.present_instance()
instance = acs_instance.restore_instance()
elif state in ['present', 'deployed']:
instance = acs_instance.present_instance()
elif state in ['stopped']:
acs_instance.present_instance(start_vm=False)
instance = acs_instance.stop_instance()
elif state in ['started']:
acs_instance.present_instance()
instance = acs_instance.start_instance()
elif state in ['restarted']:
acs_instance.present_instance()
instance = acs_instance.restart_instance()
if instance and 'state' in instance and instance['state'].lower() == 'error':
module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name'))
result = acs_instance.get_result(instance)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
import sys
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import interpolate
from os import makedirs
from os.path import exists
from vampy import vamplot
from vampy import utils
plt.rcParams['axes.labelsize'] = 9
plt.rcParams['xtick.labelsize'] = 9
plt.rcParams['ytick.labelsize'] = 9
plt.rcParams['legend.fontsize'] = 9
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = ['Arial']
def spatial_plots(fig_dims, suffix, plot_dir, n):
rc, qc, Re = self.nondim
L = self.L * rc
nt = len(self.U[0,:,0])
x = np.linspace(0, L, self.nx)
skip = int(nt/n)+1
u = ['a', 'q', 'p']
l = ['cm^2', 'cm^3/s', 'mmHg']
positions = range(0,nt-1,skip)
for i in range(2):
y = self.U[i,positions,:]
fname = "%s/%s_%s%d_spatial.png" % (plot_dir, suffix, u[i], self.pos)
Artery.plot(fig_dims, suffix, plot_dir, x, y, positions, "cm", l[i],
fname)
y = self.P[positions,:] # convert to mmHg
fname = "%s/%s_%s%d_spatial.png" % (plot_dir, suffix, u[2], self.pos)
Artery.plot(fig_dims, suffix, plot_dir, x, y, positions, "cm", l[2],
fname)
def time_plots(fig_dims, suffix, plot_dir, n, time):
rc, qc, Re = self.nondim
time = time * rc**3 / qc
skip = int(self.nx/n)+1
u = ['a', 'q', 'p']
l = ['cm^2', 'cm^3/s', 'mmHg']
positions = range(0,self.nx-1,skip)
for i in range(2):
y = self.U[i,:,positions]
fname = "%s/%s_%s%d_time.png" % (plot_dir, suffix, u[i], self.pos)
Artery.plot(fig_dims, suffix, plot_dir, time, y, positions, "t", l[i],
fname)
y = np.transpose(self.P[:,positions])
fname = "%s/%s_%s%d_time.png" % (plot_dir, suffix, u[2], self.pos)
Artery.plot(fig_dims, suffix, plot_dir, time, y, positions, "t", l[2],
fname)
def pq_plot(fig_dims, suffix, plot_dir):
L = len(self.P[0,:])-1
positions = [0, int(L/4), int(L/2), int(3*L/4), L]
y = np.transpose(self.P[:,positions])
x = self.U[1,:,positions]
fname = "%s/%s_%s%d_pq.png" % (plot_dir, suffix, 'pq', self.pos)
plt.figure(fig_dims)
labels = ['0', 'L/4', 'L/2', '3L/4', 'L']
for i in range(len(y[:,0])):
plt.plot(x[i,:], y[i,:], lw=1, color='k')
plt.xlabel('flux (cm^3/s)')
plt.ylabel('pressure (mmHg)')
plt.savefig(fname, dpi=600, bbox_inches='tight')
def plot(fig_dims, suffix, plot_dir, x, y, labels, xlabel, ylabel, fname):
colours = ['#377eb8', '#4daf4a', '#984ea3', '#d95f02']
plt.figure(fig_dims)
s = y.shape
n = min(s)
for i in range(n):
plt.plot(x, y[i,:], label="%d" % (labels[i]), lw=2, color=colours[i])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xlim([min(x), max(x)])
plt.legend()
plt.savefig(fname, dpi=600, bbox_inches='tight')
def main(param):
# read config file
f, a, s = utils.read_config(param)
data_dir = f['data_dir']
plot_dir = f['plot_dir']
suffix = f['run_id']
T = s['T']
tc = s['tc']
tf = T*tc
if not exists("%s/%s" % (plot_dir, suffix)):
makedirs("%s/%s" % (plot_dir, suffix))
pos = 0
if type(a['Ru']) is float:
L = a['Ru']*a['lam']
else:
L = a['Ru'][pos]*a['lam'][pos]
P = np.loadtxt("%s/%s/p%d_%s.csv" % (data_dir, suffix, pos, suffix), delimiter=',')
U = np.loadtxt("%s/%s/u%d_%s.csv" % (data_dir, suffix, pos, suffix), delimiter=',')
t = np.linspace(tf-T, tf, P.shape[1])
x = np.linspace(0,L,P.shape[0])
f = interpolate.interp2d(t, x, P, kind='linear')
g = interpolate.interp2d(t, x, U, kind='linear')
x = np.linspace(0, L, len(t))
P = f(t, x)
U = g(t, x)
WIDTH = 510 # the number latex spits out
FACTOR = 1.0 # the fraction of the width you'd like the figure to occupy
fig_width_pt = WIDTH * FACTOR
inches_per_pt = 1.0 / 72.27
golden_ratio = (np.sqrt(5) - 1.0) / 2.0 # because it looks good
fig_width_in = fig_width_pt * inches_per_pt # figure width in inches
fig_height_in = fig_width_in * golden_ratio # figure height in inches
fig_dims = [fig_width_in, fig_height_in] # fig dims as a list
vamplot.p3d_plot(fig_dims, suffix, plot_dir, t, P, L, pos)
vamplot.q3d_plot(fig_dims, suffix, plot_dir, t, U, L, pos)
if __name__ == "__main__":
script, param = sys.argv
main(param)
|
import os
import numpy as np
from six.moves import cPickle
from tensorflow import keras
import helper
from tfomics import utils, explain
#------------------------------------------------------------------------
num_trials = 10
model_names = ['cnn-dist', 'cnn-local']
activations = ['relu', 'exponential', 'sigmoid', 'tanh', 'softplus', 'linear', 'elu']
results_path = os.path.join('../results', 'task3')
params_path = os.path.join(results_path, 'model_params')
save_path = utils.make_directory(results_path, 'scores')
#------------------------------------------------------------------------
# load data
data_path = '../data/synthetic_code_dataset.h5'
data = helper.load_data(data_path)
x_train, y_train, x_valid, y_valid, x_test, y_test = data
# load ground truth values
test_model = helper.load_synthetic_models(data_path, dataset='test')
true_index = np.where(y_test[:,0] == 1)[0]
X = x_test[true_index][:500]
X_model = test_model[true_index][:500]
#------------------------------------------------------------------------
for model_name in model_names:
for activation in activations:
saliency_scores = []
mut_scores = []
integrated_scores = []
shap_scores = []
for trial in range(num_trials):
keras.backend.clear_session()
# load model
model = helper.load_model(model_name, activation=activation)
name = model_name+'_'+activation+'_'+str(trial)
print('model: ' + name)
# compile model
helper.compile_model(model)
# load model
weights_path = os.path.join(params_path, name+'.hdf5')
model.load_weights(weights_path)
# interpretability performance with saliency maps
print('saliency maps')
saliency_scores.append(explain.saliency(model, X, class_index=0, layer=-1))
# interpretability performance with mutagenesis
print('mutagenesis maps')
mut_scores.append(explain.mutagenesis(model, X, class_index=0, layer=-1))
# interpretability performance with integrated gradients
print('integrated gradients maps')
integrated_scores.append(explain.integrated_grad(model, X, class_index=0, layer=-1,
num_background=10, num_steps=20,
reference='shuffle'))
# interpretability performance with deepshap
print('shap maps')
shap_scores.append(explain.deepshap(model, X, class_index=0,
num_background=10, reference='shuffle'))
# save results
file_path = os.path.join(save_path, model_name+'_'+activation+'.pickle')
with open(file_path, 'wb') as f:
cPickle.dump(np.array(saliency_scores), f, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(np.array(mut_scores), f, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(np.array(integrated_scores), f, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(np.array(shap_scores), f, protocol=cPickle.HIGHEST_PROTOCOL)
|
from .. import base # pylint: disable=unuse-import
class BaseAliyunApi:
""" Aliyun base class"""
def __init__(self, client=None):
self.client = client # type: base.BaseAliyunClient
|
import yaml
import os.path
from kubernetes import client, config, utils
#############################################################################################
# Following prints a list of all SAS models both active/running and inactive/ready_for_scoring
#
def list_knative_models(kubeconfig_path,k8s_namespace, knative_scoring_ingress_host):
list_nodes, list_pods, list_ksvcs = True, True, True
try:
if os.path.exists(kubeconfig_path):
config.load_kube_config(config_file=kubeconfig_path)
else:
print("kubeconfig file not found")
return
v1 = client.CoreV1Api()
if list_nodes:
print("======================")
print("List of nodes in cluster: ")
res = v1.list_node()
#print("result type: ", type(res), type(res.items))
for i in res.items:
print(i.metadata.name, i.status.conditions[-1].type)
if list_pods:
print("======================")
print("List of Running pods/Active models scoring currently in cluster: ")
res = v1.list_namespaced_pod(namespace=k8s_namespace)
#print("result type: ", type(res), type(res.items))
for i in res.items:
print(i.metadata.name, i.status.phase)
if list_ksvcs:
print("======================")
print("List of knative svcs/Ready models for scoring in cluster: ")
print("Model Name, Model deployed date, Model Generation and Container Image Path and ScoringURL")
api = client.CustomObjectsApi()
res = api.list_namespaced_custom_object(group="serving.knative.dev",
version="v1",
plural="services",
namespace=k8s_namespace)
#print("result type: ", type(res), type(res['items']))
for i in res['items']:
#print("i", i)
if i['status']['conditions'][0]['status'] == 'True':
print(i['metadata']['name'], ",", i['metadata']['creationTimestamp'],",", i['metadata']['generation'],",",
i['spec']['template']['spec']['containers'][0]['image'],",", "ScoringURL", knative_scoring_ingress_host+i['metadata']['name']+"/")
else:
print(i['metadata']['name'], ",", i['metadata']['creationTimestamp'], ",",
i['metadata']['generation'], ",",
i['spec']['template']['spec']['containers'][0]['image'], ",", "Service not ready. Debug ksvc with k8s admin if issue persists more than 5-10 min")
except Exception as e:
print(str(e))
raise e
###########################################################################################################
#### Following deploys a knative service
##
def deploy_model_to_knative(kubeconfig_path, k8s_namespace, knative_scoring_ingress_host, knative_scoring_ingress_hostname, model_name, model_imagepath,model_timeout):
import re
sas_model_knative = {
"apiVersion": "serving.knative.dev/v1",
"kind": "Service",
"metadata" : {
"name": model_name,
},
"spec": {
"template": {
"metadata": {
"annotations": {
"autoscaling.knative.dev/window": model_timeout
}
},
"spec": {
"containers": [{
"image": model_imagepath,
}]
}
}
}
}
if not re.match("^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{,63}(?<!-)$",model_name):
print(model_name, "name should not have underscores etc and should match to a DNS Label convention")
return
try:
if os.path.exists(kubeconfig_path):
config.load_kube_config(config_file=kubeconfig_path)
else:
print("kubeconfig file not found")
return
v1 = client.CoreV1Api()
apps_v1 = client.AppsV1Api()
api = client.CustomObjectsApi()
resource = api.create_namespaced_custom_object(
group="serving.knative.dev",
version="v1",
plural="services",
namespace=k8s_namespace,
body=sas_model_knative)
print("model deployed successfully. Can be inferenced at ",knative_scoring_ingress_host+model_imagepath.split('/')[-1])
print("Since are using load balancer we need to put in Host in header info like this: ","-HHost:"+model_name+"."+k8s_namespace+"."+knative_scoring_ingress_hostname )
except Exception as e:
print(str(e))
raise e
##########################################################
# Delete the SAS model from knative setup..
def delete_model_knative(kubeconfig_path, k8s_namespace, model_name):
try:
config.load_kube_config(config_file=kubeconfig_path)
v1 = client.CoreV1Api()
api = client.CustomObjectsApi()
resource = api.delete_namespaced_custom_object(
group="serving.knative.dev",
version="v1",
name = model_name,
plural="services",
namespace=k8s_namespace,
body=client.V1DeleteOptions())
print(model_name, "Model deleted from kubernetes/knative. Container Image is not aletered. You can always redeploy")
except Exception as e:
print(str(e))
raise e
##################################################################################################
############################################################################################
####################
# Following prints a list of all revisions for a given model
#
def list_knative_revisions(kubeconfig_path,k8s_namespace, knative_scoring_ingress_host, model_name):
try:
if os.path.exists(kubeconfig_path):
config.load_kube_config(config_file=kubeconfig_path)
else:
print("kubeconfig file not found")
return
v1 = client.CoreV1Api()
print("======================")
print("List of revisions for model: ", model_name)
print("Model Revision, Ready, Generation, Container Image Path")
api = client.CustomObjectsApi()
res = api.list_namespaced_custom_object(group="serving.knative.dev",
version="v1",
plural="revisions",
namespace=k8s_namespace)
for i in res['items']:
if i['metadata']['labels']['serving.knative.dev/configuration'] == model_name:
#print("i: ", i)
print(i['metadata']['name'],",", i['status']['conditions'][-1]['status'], ",", i['metadata']['labels']['serving.knative.dev/configurationGeneration'],
",", i['spec']['containers'][0]['image'] )
print("======================")
print("Split of traffic by revision")
res = api.list_namespaced_custom_object(group="serving.knative.dev",
version="v1",
plural="services",
namespace=k8s_namespace)
for i in res['items']:
if i['metadata']['name'] == model_name:
traffic_list = i['spec']['traffic']
for traffic in traffic_list:
print("traffic: ", traffic)
#if traffic['revisionName']:
# print(traffic['revisionName'],",",traffic['percent'],",",traffic['tag'])
#else:
# print("traffic: ", traffic)
except Exception as e:
print(str(e))
raise e
##
def deploy_revision_to_knative(kubeconfig_path, k8s_namespace, model_name, blue_revision_name, blue_percent, green_revision_name, green_percent, green_revision_imagepath, model_timeout):
import re
sas_revision_knative = {
"apiVersion": "serving.knative.dev/v1",
"kind": "Service",
"metadata" : {
"name": model_name,
},
"spec": {
"template": {
"metadata": {
"name": green_revision_name,
"annotations": {
"autoscaling.knative.dev/window": model_timeout
}
},
"spec": {
"containers": [{
"image": green_revision_imagepath,
}]
}
},
"traffic": [
{
"tag": "blue",
"revisionName": blue_revision_name,
"percent": blue_percent
},
{
"tag": "green",
"revisionName": green_revision_name,
"percent": green_percent
}
]
}
}
if not re.match("^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{,63}(?<!-)$",model_name):
print(green_revision_name, "name should not have underscores etc and should match to a DNS Label convention")
return
if not model_name in green_revision_name:
print("revision name ",green_revision_name,"should have prefix of model name ", model_name)
return
try:
if os.path.exists(kubeconfig_path):
config.load_kube_config(config_file=kubeconfig_path)
else:
print("kubeconfig file not found")
return
v1 = client.CoreV1Api()
apps_v1 = client.AppsV1Api()
api = client.CustomObjectsApi()
resource = api.patch_namespaced_custom_object(
group="serving.knative.dev",
version="v1",
plural="services",
name=model_name,
namespace=k8s_namespace,
body=sas_revision_knative)
print("Revision deployed. All revisions reflect same model and URL REST API do not change")
except Exception as e:
print(str(e))
raise e
# Delete the SAS model revision from knative setup..
def delete_revision_knative(kubeconfig_path, k8s_namespace, blue_revision_name):
try:
config.load_kube_config(config_file=kubeconfig_path)
v1 = client.CoreV1Api()
api = client.CustomObjectsApi()
resource = api.delete_namespaced_custom_object(
group="serving.knative.dev",
version="v1",
name = blue_revision_name,
plural="revisions",
namespace=k8s_namespace,
body=client.V1DeleteOptions())
print(blue_revision_name," Revision deleted from kubernetes/knative.")
except Exception as e:
print(str(e))
raise e
|
# -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2019-11-30 16:32:47
# @Last Modified by: 何睿
# @Last Modified time: 2019-11-30 16:44:05
from collections import defaultdict
class Solution:
def characterReplacement(self, s: str, k: int) -> int:
window_count = defaultdict(int)
res, left, right, count_s = 0, 0, 0, len(s)
max_repeat_count = 0
while right < count_s:
window_count[s[right]] += 1
max_repeat_count = max(max_repeat_count, window_count[s[right]])
while right - left + 1 - max_repeat_count > k:
window_count[s[left]] -= 1
max_repeat_count = max(max_repeat_count, window_count[s[left]])
left += 1
res = max(res, right - left + 1)
right += 1
return res
|
import os
import subprocess
dir_path = os.path.dirname(os.path.realpath(__file__))
def run(redis_url=None):
env = os.environ.copy()
if redis_url is not None:
env['REDIS_URI'] = redis_url
return subprocess.Popen(['python', os.path.join(dir_path, 'extractor.py')], env=env)
|
import torch.nn as nn
class FeatureExtractor(nn.Module):
"""Extracts feature vectors from VGG-19"""
def __init__(self, model, i, j):
super().__init__()
maxpool = [4, 9, 18, 27, 36]
layer = maxpool[i-1]-2*j
self.features = nn.Sequential(*list(model.features.children())[:(layer+1)])
def forward(self, x):
return self.features(x)
class ResidualBlock(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(64, 64, 3, 1, 1)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(64, 64, 3, 1, 1)
self.bn2 = nn.BatchNorm2d(64)
def forward(self, x):
y = self.conv1(x)
y = self.bn1(y)
y = self.relu1(y)
y = self.conv2(y)
y = self.bn2(y)
return x + y
class UpsampleBlock(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(64, 256, 3, 1, 1)
self.shuffle = nn.PixelShuffle(2)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.shuffle(x)
x = self.relu(x)
return x
class Generator(nn.Module):
def __init__(self, b):
super().__init__()
self.b = b
self.conv1 = nn.Conv2d(3, 64, 3, 1, 1)
self.relu = nn.ReLU()
for i in range(b):
self.add_module(f'ResidualBlock_{i+1}', ResidualBlock())
self.conv2 = nn.Conv2d(64, 64, 3, 1, 1)
self.bn = nn.BatchNorm2d(64)
for i in range(2):
self.add_module(f'UpsampleBlock_{i+1}', UpsampleBlock())
self.conv3 = nn.Conv2d(64, 3, 9, 1, 4)
def forward(self, x):
x = self.relu(self.conv1(x))
y = x.clone()
for i in range(self.b):
y = self.__getattr__(f'ResidualBlock_{i+1}')(y)
y = self.conv2(y)
y = self.bn(y)
y = y + x
for i in range(2):
y = self.__getattr__(f'UpsampleBlock_{i+1}')(y)
y = self.conv3(y)
return y
class DiscriminatorBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride, 1)
self.leaky_relu = nn.LeakyReLU(0.2)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.leaky_relu(x)
x = self.bn(x)
return x
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 64, 3, 1, 1)
self.leaky_relu = nn.LeakyReLU(0.2)
self.add_module('DiscriminatorBlock1', DiscriminatorBlock(64, 64, 2))
n = 128
for i in range(3):
self.add_module(f'DiscriminatorBlock{2+2*i}', DiscriminatorBlock(n//2, n, 1))
self.add_module(f'DiscriminatorBlock{3+2*i}', DiscriminatorBlock(n, n, 2))
n *= 2
self.fc1 = nn.Linear(512*6*6, 1024)
self.fc2 = nn.Linear(1024, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.leaky_relu(self.conv1(x))
for i in range(7):
x = self.__getattr__(f'DiscriminatorBlock{i+1}')(x)
x = x.view(-1,512*6*6)
x = self.leaky_relu(self.fc1(x))
x = self.sigmoid(self.fc2(x))
x = x.view(-1,1)
return x
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import socket
import datetime
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
f=open('udp','w')
try:
server_address = ('0.0.0.0', 10000)
sock.bind(server_address)
f.writelines('Watching %s:%s\n' % server_address)
f.flush()
while True:
data, address = sock.recvfrom(4096)
f.writelines(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
f.writelines("-- %s>>%s\n" % (address,data))
f.flush()
if data:
sock.sendto("echo", address)
sock.sendto(data, address)
sock.sendto("exit", address)
finally:
f.close();
sock.close();
if __name__ == '__main__':
main()
|
import torch
####################################################################################################################################################
# Context-aware loss function
####################################################################################################################################################
class ContextAwareLoss(torch.nn.Module):
def __init__(self, K, hit_radius = 0.1, miss_radius = 0.9):
super(ContextAwareLoss,self).__init__()
self.K = K
self.hit_radius = float(hit_radius)
self.miss_radius = float(miss_radius)
def forward(self, gt_label, pred_score):
K = self.K
hit_radius = self.hit_radius
miss_radius = self.miss_radius
zeros = torch.zeros(pred_score.size()).to(pred_score.device).type(torch.float)
pred_score = 1.-pred_score
case1 = self.DownStep(gt_label, K[0]) * torch.max(zeros, - torch.log(pred_score) + torch.log(zeros + miss_radius))
case2 = self.Interval(gt_label, K[0], K[1]) * torch.max(zeros, - torch.log(pred_score + (1.-pred_score)*(self.PartialIdentity(gt_label,K[0],K[1])-K[0])/(K[1]-K[0])) + torch.log(zeros + miss_radius))
case3 = self.Interval(gt_label, K[1], 0.) * zeros
case4 = self.Interval(gt_label, 0., K[2]) * torch.max(zeros, - torch.log(1.-pred_score + pred_score*(self.PartialIdentity(gt_label,0.,K[2])-0.)/(K[2]-0.)) + torch.log(zeros + 1.-hit_radius))
case5 = self.Interval(gt_label, K[2], K[3]) * torch.max(zeros, - torch.log(pred_score + (1.-pred_score)*(self.PartialIdentity(gt_label,K[2],K[3])-K[3])/(K[2]-K[3])) + torch.log(zeros + miss_radius))
case6 = self.UpStep(gt_label, K[3]) * torch.max(zeros, - torch.log(pred_score) + torch.log(zeros + miss_radius))
L = case1 + case2 + case3 + case4 + case5 + case6
return torch.sum(L)
def UpStep(self,x,a): #0 if x<a, 1 if x >= a
return 1.-torch.max(0.*x,torch.sign(a-x))
def DownStep(self,x,a): #1 if x < a, 0 if x >=a
return torch.max(0.*x,torch.sign(a-x))
def Interval(self,x,a,b): # 1 if a<= x < b, 0 otherwise
return self.UpStep(x,a) * self.DownStep(x,b)
def PartialIdentity(self,x,a,b):#a if x<a, x if a<= x <b, b if x >= b
return torch.min(torch.max(x,0.*x+a),0.*x+b)
####################################################################################################################################################
# Spotting loss
####################################################################################################################################################
class SpottingLoss(torch.nn.Module):
def __init__(self, lambda_coord, lambda_noobj):
super(SpottingLoss,self).__init__()
self.lambda_coord = lambda_coord
self.lambda_noobj = lambda_noobj
def forward(self,y_true, y_pred):
y_pred = self.permute_ypred_for_matching(y_true,y_pred)
loss = torch.sum(y_true[:,:,0]*self.lambda_coord*torch.square(y_true[:,:,1]-y_pred[:,:,1]) + y_true[:,:,0]*torch.square(y_true[:,:,0]-y_pred[:,:,0]) + (1-y_true[:,:,0])*self.lambda_noobj*torch.square(y_true[:,:,0]-y_pred[:,:,0]) + y_true[:,:,0]*torch.sum(torch.square(y_true[:,:,2:]-y_pred[:,:,2:]),axis=-1)) #-y_true[:,:,0]*torch.sum(y_true[:,:,2:]*torch.log(y_pred[:,:,2:]),axis=-1)
return loss
def permute_ypred_for_matching(self, y_true, y_pred):
alpha = y_true[:,:,0]
x = y_true[:,:,1]
p = y_pred[:,:,1]
nb_pred = x.shape[-1]
D = torch.abs(x.unsqueeze(-1).repeat(1,1,nb_pred) - p.unsqueeze(-2).repeat(1,nb_pred,1))
D1 = 1-D
Permut = 0*D
alpha_filter = alpha.unsqueeze(-1).repeat(1,1,nb_pred)
v_filter = alpha_filter
h_filter = 0*v_filter + 1
D2 = v_filter * D1
for i in range(nb_pred):
D2 = v_filter * D2
D2 = h_filter * D2
A = torch.nn.functional.one_hot(torch.argmax(D2,axis=-1),nb_pred)
B = v_filter * A * D2
C = torch.nn.functional.one_hot(torch.argmax(B,axis=-2),nb_pred).permute(0, 2, 1)
E = v_filter * A * C
Permut = Permut + E
v_filter = (1-torch.sum(Permut,axis=-1))*alpha
v_filter = v_filter.unsqueeze(-1).repeat(1,1,nb_pred)
h_filter = 1-torch.sum(Permut, axis=-2)
h_filter = h_filter.unsqueeze(-2).repeat(1,nb_pred,1)
v_filter = 1-alpha_filter
D2 = v_filter * D1
D2 = h_filter * D2
for i in range(nb_pred):
D2 = v_filter * D2
D2 = h_filter * D2
A = torch.nn.functional.one_hot(torch.argmax(D2,axis=-1),nb_pred)
B = v_filter * A * D2
C = torch.nn.functional.one_hot(torch.argmax(B,axis=-2),nb_pred).permute(0, 2, 1)
E = v_filter * A * C
Permut = Permut + E
v_filter = (1-torch.sum(Permut,axis=-1))*(1-alpha) #here comes the change
v_filter = v_filter.unsqueeze(-1).repeat(1,1,nb_pred)
h_filter = 1-torch.sum(Permut, axis=-2)
h_filter = h_filter.unsqueeze(-2).repeat(1,nb_pred,1)
permutation = torch.argmax(Permut,axis=-1)
permuted = torch.gather(y_pred, 1, permutation.unsqueeze(-1).repeat(1,1,y_true.shape[-1]))
return permuted
|
'''
An unnecessarily complex implementation of a heap data structure.
'''
from .binary_tree import BinaryNode, BinaryTree
from operator import le, ge
def _comparisonFactory(comparisonFunction):
def compareNodes(node1, node2):
if node1 is None:
return node2
if node2 is None:
return node1
if comparisonFunction(node1.key, node2.key):
return node1
else:
return node2
return compareNodes
_max = _comparisonFactory(ge)
_min = _comparisonFactory(le)
def _swap(node1, node2):
tree = node1.tree
node1.index, node2.index = node2.index, node1.index
tree._array[node1.index] = node1
tree._array[node2.index] = node2
class HeapNode(BinaryNode):
r'''A node of a heap tree.
The node wraps a value (node.key) and has convenience methods
for accessing node.left, node.right, and node.parent.
'''
def __init__(self, key = None, tree = None):
self.index = None
super().__init__(key = key, tree = tree)
@property
def left(self):
if self.index is None:
return None
leftIndex = 2 * self.index
if leftIndex <= len(self.tree):
return self.tree[leftIndex]
else:
return None
@left.setter
def left(self, node): # Setters do not check to make sure node is a HeapNode
if self.index is None:
return None
leftIndex = 2 * self.index
if leftIndex <= len(self.tree):
self.tree[leftIndex] = node
node.index = leftIndex
node.tree = self.tree
else:
raise IndexError('Node has no left child.')
@property
def right(self):
if self.index is None:
return None
rightIndex = 2 * self.index + 1
if rightIndex <= len(self.tree):
return self.tree[rightIndex]
else:
return None
@right.setter
def right(self, node):
if self.index is None:
return None
rightIndex = 2 * self.index + 1
if rightIndex <= len(self.tree):
self.tree[rightIndex] = node
node.index = rightIndex
node.tree = self.tree
else:
raise IndexError('Node has no right child.')
@property
def parent(self):
if (self.index is not None) and (self.index != 1):
return self.tree[self.index // 2]
else:
return None
@parent.setter
def parent(self, node):
if (self.index is not None) and (self.index != 1):
self.tree[self.index // 2] = node
node.index = self.index // 2
node.tree = self.tree
else:
return IndexError('Node is root of the tree and does not have parent.')
def rotateLeft(self):
raise NotImplementedError('HeapNode.rotateLeft')
def rotateRight(self):
raise NotImplementedError('HeapNode.rotateRight')
def checkHeapProperty(self):
assert self.tree.compare(self, self.left) is self, f"Subtree rooted at key = {self.key} is not a heap."
assert self.tree.compare(self, self.right) is self, f"Subtree rooted at key = {self.key} is not a heap."
if self.left is not None:
assert self.tree is self.left.tree, f"Node (key = {self.key}) has a different tree attribute from its left child."
self.left.checkHeapProperty()
if self.right is not None:
assert self.tree is self.right.tree, f"Node (key = {self.key}) has a different tree attribute from its right child."
self.right.checkHeapProperty()
def heapify(self):
while True:
moreHeapishChild = self.tree.compare(self.left, self.right)
if moreHeapishChild is None:
return
moreHeapishNode = self.tree.compare(self, moreHeapishChild)
if moreHeapishNode is moreHeapishChild:
_swap(self, moreHeapishChild)
else:
return
siftDown = heapify
def siftUp(self):
while True:
if self.isRoot():
return
moreHeapishNode = self.tree.compare(self, self.parent)
if moreHeapishNode is self:
_swap(self, self.parent)
else:
return
def delete(self):
if self.tree is None:
raise Exception('Node is not part of a heap.')
lastNode = self.tree[-1]
if self is lastNode:
self.tree.pop()
return self
_swap(self, lastNode)
self.tree.pop()
if (lastNode.parent is None) or (lastNode.tree.compare(lastNode, lastNode.parent) is lastNode.parent):
lastNode.siftDown()
elif lastNode.tree.compare(lastNode, lastNode.parent) is lastNode:
lastNode.siftUp()
else:
raise Exception('Unknown exception in HeapNode.delete()')
return self
class Heap(BinaryTree):
r'''Heap data structure
Heap(type = 'max', fromList = None)
Creates a heap from a list of values.
Args:
type (string): 'max' or 'min' for a max-heap or a min-heap, respectively
fromList (list): iterable of objects that can be compared, or a list of BinaryNode objects
Example:
>>> print(heap.Heap(fromList = range(10)))
┌───9──┐
┌──8──┐ ┌─6─┐
┌─7─┐ ┌─4 5 2
0 3 1
'''
def __init__(self, NodeType = HeapNode, type = 'max', fromList = None):
# self._array = [None, None]
self._array = []
super().__init__(NodeType = NodeType)
self.changeType(type, buildHeapFromList = fromList)
def buildHeap(self, iterable):
r'''Heap data structure
Heap(type = 'max', fromList = None)
Creates a heap from a list of values. If heap already contains nodes,
extend the heap with new values from fromList.
Args:
type (string): 'max' or 'min' for a max-heap or a min-heap, respectively.
fromList (list): iterable of objects that can be compared, or a list of BinaryNode objects.
'''
# if iterable is None:
# raise IndexError('Passed None to Heap.buildHeap')
# self._array.pop()
current_index = len(self._array)
for elem in iterable:
node = HeapNode(elem) # Doesn't check for invalid elem
node.index = current_index
node.tree = self
self._array.append(node) # Doesn't check if node is already in self._array
current_index += 1
for idx in range(len(self) // 2, 0, -1):
self._array[idx].heapify()
extend = buildHeap
def __len__(self):
return len(self._array) - 1
def __getitem__(self, index):
return self._array[index]
def __setitem__(self, index, value):
self._array[index] = value
@property
def root(self):
if len(self) < 1:
return None
return self._array[1]
@root.setter
def root(self, node): # Maybe should forbid appending None to self._array...
if len(self._array) == 0:
self._array.append(None)
elif len(self._array) == 1:
self._array.append(node)
node.tree = self
else:
self._array[1] = node
node.tree = self
def checkHeapProperty(self):
'''Check that the rep invariant of the heap is preserved by
recursively checking the left and right children of each node.'''
self.root.checkHeapProperty()
def changeType(self, type, buildHeapFromList = None):
'''Change the heap type from a max-heap to a min-heap or vice-versa.
self.changeType(type, buildHeapFromList = None)
Args:
type (string): 'max' or 'min' for a max-heap or a min-heap, respectively.
buildHeapfromList (list): iterable of objects that can be compared, or a list
of BinaryNode objects. After changing the heap type, the heap is extended
by adding values from buildHeapFromList.
'''
if buildHeapFromList is None:
buildHeapFromList = []
if type.lower() == 'max':
self.compare = _max
elif type.lower() == 'min':
self.compare = _min
else:
raise Exception(f'Unknown comparison type {type} in Heap.changeType(type)')
self.type = type
self.buildHeap(buildHeapFromList)
def peek(self):
'''Peek at the root node'''
return self.root
def pop(self):
if len(self._array) <= 1:
return None
lastNode = self._array.pop()
lastNode.tree = None
return lastNode
def extract(self):
'''Extract the root node'''
if len(self) < 1:
return None
_swap(self._array[1], self._array[-1])
extractedNode = self.pop()
if len(self) > 0:
self._array[1].heapify()
return extractedNode
def sortedList_(self):
r'''Empties the heap and returns a sorted list of HeapNode objects.
Args: N/A
Return:
List of HeapNode objects, sorted from least to greatest if min-heap and
greatest to least if max-heap.
Example:
>>> [node.key for node in heap.Heap(fromList = range(10)).sortedList_()]
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
'''
nodeList = []
current = self.extract()
while current is not None:
nodeList.append(current)
current = self.extract()
return nodeList
def insert(self, key):
if key is None:
return None
newNode = self.NodeType(key)
newNode.tree = self
newNode.index = len(self) + 1
self._array.append(newNode)
newNode.siftUp()
return newNode
|
import pandas as pd
import numpy as np
import re
df = pd.read_csv('csv/location.csv')
df['lat_lon'] = df['location'].apply(lambda x : re.findall('(\d{2}.\d{10}),(\d{2}.\d{10})',x,re.DOTALL))
print(type(df['lat_lon'][0][0]))
df.drop(686, axis=0, inplace=True)
df['lat'] = [x[0][0] for x in df['lat_lon']]
df['lon'] = [x[0][1] for x in df['lat_lon']]
print(df.info())
df= df[['names','location', 'lat', 'lon']]
df.to_csv('csv/lat_lon.csv', index=False)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
setup.py
A module that installs reporter as a module
"""
from glob import glob
from os.path import basename, splitext
from setuptools import find_packages, setup
setup(
name='reporter',
version='1.0.0',
license='MIT',
description='Reports AGOL usage statistics.',
author='AGRC',
author_email='agrc@utah.gov',
url='https://github.com/agrc/reporter',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=True,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Utilities',
],
project_urls={
'Issue Tracker': 'https://github.com/agrc/reporter/issues',
},
keywords=['gis'],
install_requires=[],
extras_require={
'tests': [
'pylint-quotes==0.2.*',
'pylint==2.5.*',
'pytest-cov==2.9.*',
'pytest-instafail==0.4.*',
'pytest-isort==1.0.*',
'pytest-pylint==0.14.*',
'pytest-watch==4.2.*',
'pytest==4.*',
'yapf==0.30.*',
'pytest-mock==3.2.*',
]
},
setup_requires=[
'pytest-runner',
],
entry_points={'console_scripts': [
'reporter = reporter.main:main',
]},
)
|
from functools import partial
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from models.category_model import Category
from views.popup_view import PopupView
class ProductList:
''' Controlls the product list view, and the objects '''
def __init__(self, products, main_window):
self.LIMIT_ITEMS_COUNT_X = 2
self.LIMIT_ITEMS_COUNT_Y = 1
shape = QDesktopWidget().screenGeometry()
self.width = shape.width()
self.height = shape.height()
self.products = products
self.widget = self.createWidget()
self.main_window = main_window
def show(self):
self.widget.showFullScreen()
def hide(self):
self.widget.hide()
def handleProductPressed(self, product):
self.popup = PopupView(self.widget, product)
self.popup.showFullScreen()
def handle_back_button_pressed(self):
self.main_window.reload_data()
self.widget.close()
def createWidget(self):
widget = QWidget()
grid = QGridLayout(widget)
bg = QLabel(widget)
bg.setPixmap(QPixmap("assets/background.jpg"))
bg.resize(self.width, self.height)
bg.setAlignment(Qt.AlignCenter)
# Add a button
pos_x = 0
pos_y = 0
for i in self.products:
btn = QPushButton('{}\nR${:.2f}'.format(i.name, i.price))
btn.clicked.connect(partial(self.handleProductPressed, i))
btn.setSizePolicy(
QSizePolicy.Preferred,
QSizePolicy.Preferred)
btn.setStyleSheet(
"background-color: rgba(0, 0, 0, 0); border: 2px solid white; font-size: 18px; font-weight: bold; color: white")
grid.addWidget(btn, pos_y, pos_x, len(self.products) > self.LIMIT_ITEMS_COUNT_X and 1 or 1, 1)
pos_x += 1
if pos_x > self.LIMIT_ITEMS_COUNT_X:
pos_x = 0
pos_y += 1
if pos_y >= self.LIMIT_ITEMS_COUNT_Y:
pass
button_back = QPushButton('Voltar')
button_back.clicked.connect(self.handle_back_button_pressed)
button_back.setSizePolicy(
QSizePolicy.Preferred,
QSizePolicy.Preferred)
button_back.setStyleSheet(
"background-color: rgba(0, 0, 0, 0); border: 2px solid white; font-size: 28px; font-weight: bold; color: white")
grid.addWidget(button_back, round(len(self.products) / self.LIMIT_ITEMS_COUNT_X), 0, 1, -1)
return widget
@staticmethod
def getProducts():
return Category.get_categories()
|
from authorization import authorization
from data_reading import read_data_from_api,read_token_file,read_from_local_data_file
from prettytable import PrettyTable
import datetime
table = PrettyTable()
def user_prompt():
"""
Description:
user_prompt is responsible for asking a user if they are
a student or a volunteer.
Parameter:
Takes no Parameters:
None:None
return:
returns None or dictionary type:
None:None
dict:dict
"""
user_type = input("Are you a student? Y/N: ").upper()
email = input("Please enter your email address: ")
if user_type.lower() == "y":
return {"user_type": "student", "user_email": email}
elif user_type.lower() =="n":
return {"user_type": "volunteer", "user_email": email}
else:
return None
def get_date():
"""
Description:
get_date is responsible for getting user input and verify if it is
a valid date.
Parameters:
Takes no Parameters:
None:None
return:
returns None or date of type String:
None:None
date:String
"""
date = input("Enter date (yyyy-mm-dd): ")
date_list = date.split("-",2)
if len(date_list) == 3:
year = date_list[0]
month = date_list[1]
day = date_list[2]
if (year.isdigit() and len(year) == 4) and (month.isdigit() and len(month) == 2) and (day.isdigit() and len(day) == 2):
return f"{year}-{month}-{day}"
else:
print("Wrong date")
return None
else:
print("Wrong date")
return None
def get_time():
"""
Description:
get_time is responsible for getting user input and verify if it is
a valid time.
Parameters:
Takes no Parameters:
None:None
return:
returns None or time of type String:
None:None
time:String
"""
time = input("Enter start time between 09:00-16:00 (hh:mm): ")
time_list = time.split(":")
if len(time_list) == 2:
hours = time_list[0]
minutes = time_list[1]
if (hours.isdigit() and int(hours) in range(9,16+1)) and (minutes.isdigit() and int(minutes) in range(60)):
return f"{hours}:{minutes}"
else:
print("Wrong time")
return None
else:
print("Wrong time")
return None
def get_datetime():
"""
Description:
get_datetime is responsible for verifying if a date is a current
or future date.
Parameters:
Takes no Parameters:
None:None
return:
returns None or start_time and end_time of type datetime:
None:None
start_time:datetime
end_time:datetime
"""
date = get_date()
if date != None:
time = get_time()
if time != None:
year = date.split("-")[0]
month = date.split("-")[1]
day = date.split("-")[2]
hours = time.split(":")[0]
minutes = time.split(":")[1]
current_date = str(datetime.datetime.now()).split()[0]
if date >= current_date:
start_time = datetime.datetime(int(year),int(month),int(day),int(hours),int(minutes),00).isoformat()
end_time = (datetime.datetime(int(year),int(month),int(day),int(hours),int(minutes),00) + datetime.timedelta(minutes=30)).isoformat()
return start_time,end_time
else:
print("Provide new date")
return None
else:
print("Provide correct time")
return None
else:
print("Provide correct date")
return None
def start_system():
"""
Description:
start_system is responsible for the authorization of the client.
Parameters:
Takes no parameters:
None:None
return:
returns None type:
None:None
"""
if authorization.client_authorization():
print("Login succesful.\nWelcome To WethinkCode Code Clinic Booking System\n")
def exit_system():
"""
Description:
exit_system is responsible for revoking and deleting an access token.
Parameters:
Takes no parameters:
None:None
return:
returns None type:
None:None
"""
if authorization.revoke_tokens() == True and authorization.delete_token() == True:
print("You have logged out of the WeThinkCode Code Clinic Booking System.")
def help():
"""
Description:
help is used to print the allowable commands when the user enters "help".
Parameters:
Takes no parameters:
None:None
return:
returns None type:
None:None
"""
print("""Code Clinic Booking System 0.0.1
USAGE:
code-clinic [FLAGS] [SUBCOMMAND]
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
SUBCOMMANDS:
login Logs user into the code clinic booking system
logout Logs user out the code clinic booking system
create_slot Allows a volunteer to create a slot
book [slot name] Allows a student to book a slot
read_event Allows the user to view slots they are confirmed for
check_slot [number of days being viewed] Checks the availability of slots
cancel_slot [slot name] Allows a volunteer to cancel slot
cancel_booking [slot name] Cancels booking made by student
""")
def check_slot(days=7):
"""
Description:
check_slot is responsible for checking available slots for the next 7 days.
Parameters:
Takes one parameter of type String:
days:String
return:
returns None type:
None:None
"""
status = read_data_from_api.read_from_api(days)
if status == True:
read_api_data = read_from_local_data_file.read_from_file()
print("-"*25, "LIST OF AVAILABLE SLOTS", "-"*25)
print("\n")
for index in range(len(read_api_data["items"])):
event = read_api_data["items"][index]
print(f"""Summary: {event['summary']}
description: {event['description']}
volunteer: {event['creator']['email']}
location: {event['location']}
start: {event['start']['dateTime']}
end: {event['end']['dateTime']}""")
print("-"*75)
|
import setuptools
with open("README.md") as fp:
long_description = fp.read()
setuptools.setup(
name="aws_cdk_python_dev_guide",
version="0.0.1",
description="A guide for AWS CDK development using Python",
long_description=long_description,
long_description_content_type="text/markdown",
author="therightstuff",
license='MIT',
package_dir={"": "aws_cdk_python_dev_guide"},
packages=setuptools.find_packages(where="aws_cdk_python_dev_guide"),
install_requires=[
"aws-cdk.assets==1.86.0",
"aws-cdk.aws-apigateway==1.86.0",
"aws-cdk.aws-applicationautoscaling==1.86.0",
"aws-cdk.aws-autoscaling-common==1.86.0",
"aws-cdk.aws-certificatemanager==1.86.0",
"aws-cdk.aws-cloudformation==1.86.0",
"aws-cdk.aws-cloudwatch==1.86.0",
"aws-cdk.aws-codeguruprofiler==1.86.0",
"aws-cdk.aws-ec2==1.86.0",
"aws-cdk.aws-ecr==1.86.0",
"aws-cdk.aws-ecr-assets==1.86.0",
"aws-cdk.aws-efs==1.86.0",
"aws-cdk.aws-elasticloadbalancingv2==1.86.0",
"aws-cdk.aws-events==1.86.0",
"aws-cdk.aws-iam==1.86.0",
"aws-cdk.aws-kms==1.86.0",
"aws-cdk.aws-lambda==1.86.0",
"aws-cdk.aws-logs==1.86.0",
"aws-cdk.aws-route53==1.86.0",
"aws-cdk.aws-s3==1.86.0",
"aws-cdk.aws-s3-assets==1.86.0",
"aws-cdk.aws-sns==1.86.0",
"aws-cdk.aws-sqs==1.86.0",
"aws-cdk.aws-ssm==1.86.0",
"aws-cdk.cloud-assembly-schema==1.86.0",
"aws-cdk.core==1.86.0",
"aws-cdk.custom-resources==1.86.0",
"aws-cdk.cx-api==1.86.0",
"aws-cdk.region-info==1.86.0",
],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Code Generators",
"Topic :: Utilities",
"Typing :: Typed",
],
)
|
from identixone.api.sources.v1.sources import Sources
__all__ = [Sources]
|
import numpy as np
from numpy.fft import fft2, ifft2, ifftshift, fftshift
def shear_grid_mask(shape, acceleration_rate, sample_low_freq=False,
centred=False, sample_n=4, test=False):
'''
Creates undersampling mask which samples in sheer grid
Parameters
----------
shape: (nt, nx, ny)
acceleration_rate: int
Returns
-------
array
'''
Nt, Nx, Ny = shape
if test:
start = 0
else:
start = np.random.randint(0, acceleration_rate)
mask = np.zeros((Nt, Nx))
for t in range(Nt):
mask[t, (start+t)%acceleration_rate::acceleration_rate] = 1
xc = Nx // 2
xl = sample_n // 2
if sample_low_freq and centred:
xh = xl
if sample_n % 2 == 0:
xh += 1
mask[:, xc - xl:xc + xh+1] = 1
elif sample_low_freq:
xh = xl
if sample_n % 2 == 1:
xh -= 1
if xl > 0:
mask[:, :xl] = 1
if xh > 0:
mask[:, -xh:] = 1
mask_rep = np.repeat(mask[..., np.newaxis], Ny, axis=-1)
return mask_rep
def undersample(x, mask, centred=False, norm='ortho', noise=0):
'''
Undersample x. FFT2 will be applied to the last 2 axis
Parameters
----------
x: array_like
data
mask: array_like
undersampling mask in fourier domain
norm: 'ortho' or None
if 'ortho', performs unitary transform, otherwise normal dft
noise_power: float
simulates acquisition noise, complex AWG noise.
must be percentage of the peak signal
Returns
-------
xu: array_like
undersampled image in image domain. Note that it is complex valued
x_fu: array_like
undersampled data in k-space
'''
assert x.shape == mask.shape
# zero mean complex Gaussian noise
noise_power = noise
nz = np.sqrt(.5)*(np.random.normal(0, 1, x.shape) + 1j * np.random.normal(0, 1, x.shape))
nz = nz * np.sqrt(noise_power)
if norm == 'ortho':
# multiplicative factor
nz = nz * np.sqrt(np.prod(mask.shape[-2:]))
else:
nz = nz * np.prod(mask.shape[-2:])
if centred:
axes = (-2, -1)
x_f = fftshift(fft2(ifftshift(x, axes=axes), norm=norm), axes=axes)
x_fu = mask * (x_f + nz)
x_u = fftshift(ifft2(ifftshift(x_fu, axes=axes), norm=norm), axes=axes)
return x_u, x_fu
else:
x_f = fft2(x, norm=norm)
x_fu = mask * (x_f + nz)
x_u = ifft2(x_fu, norm=norm)
return x_u, x_fu
|
import numpy as np
class Statistics:
def __init__(self, keep_all=False):
self.keep_all = keep_all
self.data = []
self.n = 0
self.avg = 0.0
def add(self, x):
if self.keep_all:
self.data += x
else:
self.avg = self.avg * (self.n / float(self.n + 1)) + x * (1. / float(self.n + 1))
self.n += 1
def mean(self):
if self.keep_all:
return np.mean(self.data)
else:
return self.avg
@staticmethod
def get_stats(k, keep_all=False):
return [Statistics(keep_all) for _ in range(k)]
def np(self):
return np.array(self.data)
|
"""
The implementation code for the command-send GDS CLI commands
"""
import difflib
from typing import Iterable, List
from fprime.common.models.serialize.type_exceptions import NotInitializedException
from fprime_gds.common.data_types.cmd_data import CommandArgumentsException
from fprime_gds.common.gds_cli.base_commands import QueryHistoryCommand
import fprime_gds.common.gds_cli.misc_utils as misc_utils
import fprime_gds.common.gds_cli.test_api_utils as test_api_utils
from fprime_gds.common.pipeline.dictionaries import Dictionaries
from fprime_gds.common.templates.cmd_template import CmdTemplate
from fprime_gds.common.testing_fw import predicates
from fprime_gds.common.testing_fw.api import IntegrationTestAPI
class CommandSendCommand(QueryHistoryCommand):
"""
The implementation for sending a command via the GDS to the spacecraft
"""
@staticmethod
def get_closest_commands(
project_dictionary: Dictionaries, command_name: str, num: int = 3
) -> List[str]:
"""
Searches for the closest matching known command(s) to the given command
name.
:param project_dictionary: The dictionary object for this project
containing the item type definitions
:param command_name: The full string name of the command to search for
:param num: The maximum number of near-matches to return
:return: A list of the closest matching commands (potentially empty)
"""
known_commands = project_dictionary.command_name.keys()
closest_matches = difflib.get_close_matches(command_name, known_commands, n=num)
return closest_matches
# TODO: Make this a method on one of the pipeline classes instead?
@staticmethod
def get_command_template(
project_dictionary: Dictionaries, command_name: str
) -> CmdTemplate:
"""
Retrieves the command template for the given command name
:param project_dictionary: The dictionary object for this project
containing the item type definitions
:param command_name: The full string name of the command to return a
template for
:return: The CmdTemplate object for the given command
"""
return project_dictionary.command_name[command_name]
@staticmethod
def get_command_help_message(
project_dictionary: Dictionaries, command_name: str
) -> str:
"""
Returns a string showing a help message for the given GDS command.
:param project_dictionary: The dictionary object for this project
containing the item type definitions
:param command_name: The full string name of the command to return a
help message for
:return: A help string for the command
"""
command_template = CommandSendCommand.get_command_template(
project_dictionary, command_name
)
# TODO: Refactor CommandsCommand's method into a common class, since
# this is technically a private method?
return misc_utils.get_cmd_template_string(command_template)
@classmethod
def _get_item_list(
cls,
project_dictionary: Dictionaries,
filter_predicate: predicates.predicate,
json: bool = False,
) -> Iterable[CmdTemplate]:
"""
Gets a list of available commands in the system and return them in an
ID-sorted list.
:param project_dictionary: The dictionary object for the project
containing the command definitions
:param filter_predicate: Test API predicate used to filter shown
channels
:param json: Whether to print out each item in JSON format or not
"""
# NOTE: Trying to create a blank CmdData causes errors, so currently
# just using templates (i.e. this function does nothing)
create_empty_command = lambda cmd_template: cmd_template
command_list = test_api_utils.get_item_list(
item_dictionary=project_dictionary.command_id,
search_filter=filter_predicate,
template_to_data=create_empty_command,
)
return command_list
@classmethod
def _get_upcoming_item(
cls, api, filter_predicate, min_start_time="",
):
"""
TODO: Doesn't use _get_upcoming_item; sign that this should not use QueryHistory as a base class?
"""
pass
@classmethod
def _get_item_string(cls, item: CmdTemplate, json: bool = False,) -> str:
"""
Converts the given command template into a human-readable string.
:param item: The CmdTemplate to convert to a string
:param json: Whether or not to return a JSON representation of "temp"
:return: A readable string version of "item"
"""
return misc_utils.get_cmd_template_string(item, json)
# TODO: Cut down on the number of arguments here?
@classmethod
def handle_arguments(
cls,
dictionary: str,
ip_address: str,
port: int,
command_name: str,
arguments: List[str],
list: bool,
ids: Iterable[int],
components: Iterable[str],
search: str,
json: bool,
*args,
**kwargs
):
"""
Handle the command-send arguments to connect to the Test API correctly,
then send the command via the Test API.
For more details on these arguments, see the command-send definition at:
Gds/src/fprime_gds/executables/fprime_cli.py
"""
search_filter = cls._get_search_filter(ids, components, search, json)
if list:
cls._log(cls._list_all_possible_items(dictionary, search_filter, json))
return
# TODO: Make this api setup part of a decorator somehow, since it
# recurs in several places?
# ======================================================================
pipeline, api = test_api_utils.initialize_test_api(
dictionary, server_ip=ip_address, server_port=port
)
# ======================================================================
try:
api.send_command(command_name, arguments)
except KeyError:
cls._log("'%s' is not a known command" % (command_name))
close_matches = CommandSendCommand.get_closest_commands(
pipeline.dictionaries, command_name
)
if close_matches:
cls._log("Similar known commands: {}".format(close_matches))
except NotInitializedException:
temp = CommandSendCommand.get_command_template(
pipeline.dictionaries, command_name
)
cls._log(
"'%s' requires %d arguments (%d given)"
% (command_name, len(temp.get_args()), len(arguments))
)
cls._log(cls.get_command_help_message(pipeline.dictionaries, command_name))
except CommandArgumentsException as err:
cls._log("Invalid arguments given; %s" % (str(err)))
cls._log(cls.get_command_help_message(pipeline.dictionaries, command_name))
# ======================================================================
pipeline.disconnect()
api.teardown()
# ======================================================================
|
from .siamrpn import SiamRPN
def get_tracker_class():
return SiamRPN
|
"""
Calculate the middle points, areas and best predictors of the polygons and the relative areas of the best predictors
"""
from shapely.geometry.polygon import Polygon
def get_predictor_cost(x, y, rho, sens, spec, cov):
"""
Calculate the predictor's cost on a point (x, y) based on the rho and its sensitivity, specificity and coverage
"""
return x * ((rho * cov * (1 - sens)) + cov - 1) + y * (((1 - rho) * cov * (1 - spec)) + cov - 1) + 1 - cov
def get_polygon_best_predictor(rho, predictors, polygons):
"""
Calculate the predictor with the best cost in a polygon
"""
polygon_best_predictor = []
for polygon_id, polygon in enumerate(polygons):
centroid = Polygon(polygon).centroid
cost_predictors = {predictor: get_predictor_cost(centroid.x, centroid.y, rho, *predictors[predictor])
for predictor in predictors}
best_predictor = min(cost_predictors, key=cost_predictors.get)
polygon_best_predictor.append(best_predictor)
return polygon_best_predictor
def get_best_predictor_area(polygons, polygon_best_predictor):
"""
Calculate the total area of the best predictors in the triangle
"""
best_predictor_areas = dict.fromkeys(polygon_best_predictor, 0.0)
for polygon_id, polygon in enumerate(polygons):
best_predictor_areas[polygon_best_predictor[polygon_id]] += Polygon(polygon).area
return best_predictor_areas
def get_predictor_area(best_predictor_areas, predictors):
"""
Calculate the area and relative areas of predictors
"""
predictor_areas = {predictor: best_predictor_areas.get(predictor, 0.0) for predictor in predictors}
predictor_relative_areas = {predictor: predictor_areas[predictor] / 0.5 for predictor in predictors}
return predictor_areas, predictor_relative_areas
def get_polygons_data(rho, predictors, polygons):
"""
Calculate the best predictor in a polygon and best predictors' areas and relative areas
"""
polygon_best_predictor = get_polygon_best_predictor(rho, predictors, polygons)
best_predictor_areas = get_best_predictor_area(polygons, polygon_best_predictor)
predictor_areas, predictor_relative_areas = get_predictor_area(best_predictor_areas, predictors)
return predictor_areas, predictor_relative_areas
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2022 by Clemens Rabe <clemens.rabe@clemensrabe.de>
# All rights reserved.
# This file is part of gitcache (https://github.com/seeraven/gitcache)
# and is released under the "BSD 3-Clause License". Please see the LICENSE file
# that is included as part of this package.
#
"""Unit tests of the git_cache.command_execution module testing simple_call_command()."""
# -----------------------------------------------------------------------------
# Module Import
# -----------------------------------------------------------------------------
import io
import platform
import sys
from unittest import TestCase
from git_cache.command_execution import simple_call_command
# -----------------------------------------------------------------------------
# Test Class
# -----------------------------------------------------------------------------
class GitCacheSimpleCallCommandTest(TestCase):
"""Test the :func:`git_cache.command_execution.simple_call_command` function."""
def setUp(self):
"""Set up the test case."""
# Nose does not provide a sys.stdout.buffer member
if not hasattr(sys.stdout, 'buffer'):
sys.stdout.buffer = io.BytesIO()
self.on_windows = platform.system().lower().startswith('win')
def test_return(self):
"""git_cache.command_execution.simple_call_command(): Get return code."""
if self.on_windows:
cmd_true = ['cmd.exe', '/C', 'exit 0']
cmd_false = ['cmd.exe', '/C', 'exit 1']
else:
cmd_true = ['true']
cmd_false = ['false']
self.assertEqual(0, simple_call_command(cmd_true))
self.assertEqual(1, simple_call_command(cmd_false))
def test_shell_return(self):
"""git_cache.command_execution.simple_call_command(): Get return code using shell."""
self.assertEqual(0, simple_call_command('exit 0', shell=True))
self.assertEqual(1, simple_call_command('exit 1', shell=True))
def test_command_not_found(self):
"""git_cache.command_execution.simple_call_command(): Command not found."""
self.assertEqual(127, simple_call_command('cmd_doesnt_exist'))
def test_shell_command_not_found(self):
"""git_cache.command_execution.simple_call_command(): Command not found using shell."""
self.assertNotEqual(0, simple_call_command('cmd_doesnt_exist', shell=True))
# -----------------------------------------------------------------------------
# EOF
# -----------------------------------------------------------------------------
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def predict_final(prefix=None,
assignment=None,
tutorial=None,
midterm=None,
takehome=None,
final=None):
""" Predictor for Final from model/59db76eb9b356c2c97004804
Predictive model by BigML - Machine Learning Made Easy
"""
if (midterm is None):
return {"prediction":0.38343}
if (midterm > 77.08667):
if (takehome is None):
return {"prediction":20.38342}
if (takehome > 106.945):
return {"prediction":3.43945}
if (takehome <= 106.945):
if (tutorial is None):
return {"prediction":22.41332}
if (tutorial > 78.665):
return {"prediction":23.88547}
if (tutorial <= 78.665):
return {"prediction":8.56289}
if (midterm <= 77.08667):
if (midterm > 48.75):
if (takehome is None):
return {"prediction":-4.5295}
if (takehome > 53.795):
if (midterm > 73.44):
if (takehome > 73.795):
return {"prediction":-13.82749}
if (takehome <= 73.795):
return {"prediction":-3.41771}
if (midterm <= 73.44):
if (assignment is None):
return {"prediction":-0.71945}
if (assignment > 82.74):
if (tutorial is None):
return {"prediction":-3.97172}
if (tutorial > 103.945):
if (tutorial > 104.835):
return {"prediction":-0.08074}
if (tutorial <= 104.835):
return {"prediction":1.25414}
if (tutorial <= 103.945):
if (midterm > 62.5):
if (midterm > 65.31):
return {"prediction":-4.37529}
if (midterm <= 65.31):
return {"prediction":4.4972}
if (midterm <= 62.5):
if (tutorial > 95.71):
return {"prediction":-14.0932}
if (tutorial <= 95.71):
return {"prediction":-1.74541}
if (assignment <= 82.74):
if (tutorial is None):
return {"prediction":7.50115}
if (tutorial > 96.79):
return {"prediction":-0.71028}
if (tutorial <= 96.79):
return {"prediction":10.35668}
if (takehome <= 53.795):
return {"prediction":-13.76724}
if (midterm <= 48.75):
if (takehome is None):
return {"prediction":-15.51536}
if (takehome > 58.89):
return {"prediction":-18.93268}
if (takehome <= 58.89):
if (tutorial is None):
return {"prediction":-5.65621}
if (tutorial > 77.095):
return {"prediction":-7.01337}
if (tutorial <= 77.095):
return {"prediction":-0.79237}
def predict(prefix=None,
assignment=None,
tutorial=None,
midterm=None,
takehome=None,
final=None):
prediction = predict_final(prefix=prefix, assignment=assignment, tutorial=tutorial, midterm=midterm, takehome=takehome, final=final)
prediction.update({"weight": 0.09621})
return prediction
|
import numpy as np
np.random.seed(0)
def sigmoid(x):
out = 1 / (1+np.exp(-x))
return out
def relu(x):
out = x
out[out < -1] = -1
out[out > -1 and out < 0] = 0
return out
def tanh(x):
out = (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))
return out
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
class Network(object):
def __init__(self, shape, weights_path=None):
self.shape = shape
self.weights = []
self.bias = []
for i in range(len(self.shape)-1):
self.weights.append(np.random.uniform(-1, 1, (self.shape[i], self.shape[i+1])))
for i in range(len(self.shape)-1):
self.bias.append(np.random.uniform(-1, 1, self.shape[i+1]))
if weights_path:
temp = np.genfromtxt(weights_path, delimiter=" ")
self._import(temp)
self.layers = []
for i in range(len(self.shape)):
self.layers.append(np.zeros(self.shape[i]))
def forward_propagation(self, inputs):
self.layers[0] = inputs
for i in range(len(self.shape)-1):
self.layers[i+1] = np.dot(self.layers[i], self.weights[i])
self.layers[i+1] = self.layers[i+1] + (self.bias[i])
self.layers[i+1] = sigmoid(self.layers[i+1])
return self.layers[len(self.layers)-1]
def export(self):
vect = []
for layer in self.weights:
for k in layer:
for j in k:
vect.append(j)
for i in self.bias:
for k in i:
vect.append(k)
return np.array(vect)
def _import(self, vect):
s = 0
for layer in range(len(self.shape)-1):
for k in range(len(self.weights[layer])):
for j in range(len(self.weights[layer][k])):
self.weights[layer][k][j] = vect[s]
s += 1
for i in range(len(self.shape)-1):
for k in range(len(self.bias[i])):
self.bias[i][k] = vect[s]
s += 1
|
routes = [
('Binance Futures', 'BTC-USDT', '1m', 'OttBands1min', 'hY?9')
]
extra_candles = []
|
# -*- coding:utf-8 -*-
__author__ = 'shshen'
import os
import random
import csv
import logging
import numpy as np
def logger_fn(name, input_file, level=logging.INFO):
tf_logger = logging.getLogger(name)
tf_logger.setLevel(level)
log_dir = os.path.dirname(input_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(input_file, mode='w')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
tf_logger.addHandler(fh)
return tf_logger
def read_data_from_csv_file(fileName):
rows = []
max_skill_num = 0
max_num_problems = 116
with open(fileName, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
rows.append(row)
'''
for indx in range(0, len(rows)):
if (indx + 1 )% 3 == 0:
rand = random.randint(0, len(rows[indx]) - 1)
if int(rows[indx][rand]) == 1:
rows[indx][rand] = 0
if int(rows[indx][rand]) == 0:
rows[indx][rand] = 1
'''
index = 0
print ("the number of rows is " + str(len(rows)))
tuple_rows = []
#turn list to tuple
while(index < len(rows)-1):
problems_num = int(rows[index][0])
tmp_max_skill = max(map(int, rows[index+1]))
'''
cc = []
for item in rows[index+2]:
cc.append(int(item))
a_r = round(sum(cc) / problems_num, 2)
if a_r == 0.0 or a_r == 1.0:
index += 3
continue
'''
if(tmp_max_skill > max_skill_num):
max_skill_num = tmp_max_skill
if(problems_num <= 2):
index += 3
else:
if problems_num > max_num_problems:
count = problems_num // max_num_problems
iii = 0
while(iii <= count):
if iii != count:
tup = (max_num_problems, rows[index+1][iii * max_num_problems : (iii+1)*max_num_problems], rows[index+2][iii * max_num_problems : (iii+1)*max_num_problems])
elif problems_num - iii*max_num_problems > 2:
tup = (problems_num - iii*max_num_problems, rows[index+1][iii * max_num_problems : (iii+1)*max_num_problems], rows[index+2][iii * max_num_problems : (iii+1)*max_num_problems])
else:
break
tuple_rows.append(tup)
iii += 1
index += 3
else:
tup = (problems_num, rows[index+1], rows[index+2])
tuple_rows.append(tup)
index += 3
#shuffle the tuple
random.shuffle(tuple_rows)
print ("The number of students is ", len(tuple_rows))
print ("Finish reading data")
return tuple_rows, max_num_problems, max_skill_num+1
def read_test_data_from_csv_file(fileName):
rows = []
max_skill_num = 0
max_num_problems = 116
with open(fileName, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
rows.append(row)
'''
for indx in range(0, len(rows)):
if (indx + 1 )% 3 == 0:
rand = random.randint(0, len(rows[indx]) - 1)
if int(rows[indx][rand]) == 1:
rows[indx][rand] = 0
if int(rows[indx][rand]) == 0:
rows[indx][rand] = 1
'''
index = 0
print ("the number of rows is " + str(len(rows)))
tuple_rows = []
#turn list to tuple
while(index < len(rows)-1):
problems_num = int(rows[index][0])
tmp_max_skill = max(map(int, rows[index+1]))
'''
cc = []
for item in rows[index+2]:
cc.append(int(item))
a_r = round(sum(cc) / problems_num, 2)
if a_r == 0.0 or a_r == 1.0:
index += 3
continue
'''
if(tmp_max_skill > max_skill_num):
max_skill_num = tmp_max_skill
if(problems_num <= 2):
index += 3
else:
if problems_num > max_num_problems:
count = problems_num // max_num_problems
iii = 0
while(iii <= count):
if iii != count:
tup = (max_num_problems, rows[index+1][iii * max_num_problems : (iii+1)*max_num_problems], rows[index+2][iii * max_num_problems : (iii+1)*max_num_problems])
elif problems_num - iii*max_num_problems > 2:
tup = (problems_num - iii*max_num_problems, rows[index+1][iii * max_num_problems : (iii+1)*max_num_problems], rows[index+2][iii * max_num_problems : (iii+1)*max_num_problems])
else:
break
tuple_rows.append(tup)
iii += 1
index += 3
else:
tup = (problems_num, rows[index+1], rows[index+2])
tuple_rows.append(tup)
index += 3
#shuffle the tuple
# random.shuffle(tuple_rows)
print ("The number of students is ", len(tuple_rows))
print ("Finish reading data")
return tuple_rows, max_num_problems, max_skill_num+1
|
""" Copyright (c) 2019 Lumerical Inc. """
import sys
sys.path.append(".")
import os
from qatools import *
from lumopt.utilities.simulation import Simulation
from lumopt.utilities.base_script import BaseScript
class TestFDTDBaseScript(TestCase):
"""
Unit test for BaseScript class. It verifies that the object is able to run an *.lsf script, a *.fsp project file or a plain script in a string.
"""
file_dir = os.path.abspath(os.path.dirname(__file__))
def setUp(self):
self.sim = Simulation(workingDir = self.file_dir, use_var_fdtd = False, hide_fdtd_cad = True)
def test_eval_project_file(self):
my_project_file = os.path.join(self.file_dir,'base_fdtd_script_test.fsp')
base_script_obj = BaseScript(my_project_file)
base_script_obj(self.sim.fdtd)
self.assertTrue(self.sim.fdtd.getnamednumber('FDTD') == 1)
self.assertTrue(self.sim.fdtd.getnamednumber('source') == 1)
self.assertTrue(self.sim.fdtd.getnamednumber('polygon') == 1)
def test_eval_python_script(self):
my_fun = lambda fdtd_handle: fdtd_handle.addfdtd()
base_script_obj = BaseScript(my_fun)
base_script_obj.eval(self.sim.fdtd)
self.assertTrue(self.sim.fdtd.getnamednumber('FDTD') == 1)
def test_eval_script_file(self):
my_script_file = os.path.join(self.file_dir,'base_fdtd_script_test.lsf')
base_script_obj = BaseScript(my_script_file)
base_script_obj.eval(self.sim.fdtd)
self.assertTrue(self.sim.fdtd.getnamednumber('FDTD') == 1)
def test_eval_script(self):
my_script = "load('base_fdtd_script_test.fsp');"
base_script_obj = BaseScript(my_script)
base_script_obj.eval(self.sim.fdtd)
self.assertTrue(self.sim.fdtd.getnamednumber('FDTD') == 1)
self.assertTrue(self.sim.fdtd.getnamednumber('source') == 1)
self.assertTrue(self.sim.fdtd.getnamednumber('polygon') == 1)
if __name__ == "__main__":
run([__file__])
|
from bs4 import BeautifulSoup
import requests
from summarize import getSummary
import datetime
def parseTime(time):
return time
headers = {
"Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:80.0) Gecko/20100101 Firefox/80.0",
}
listSite = "https://timesofindia.indiatimes.com"
baseURL = "https://timesofindia.indiatimes.com"
page = requests.get(listSite, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
listN = soup.find(class_ = "list9").find_all('li')
topNews = []
for site in listN :
print(site)
try:
heading = site.find('a').text
heading = heading.replace('\xa0', ' ').encode('utf-8')
link = baseURL + site.a["href"]
news = {
"href": link,
"headline": heading,
# "time": time
}
topNews.append(news)
except:
print("Non list item ")
print(topNews)
|
import MicroRegEx
regex = MicroRegEx.compile("(a|b)cd*e?")
result = regex.match("abcde")
print(result)
result = regex.match("acde")
print(result)
|
import xlwt
from django.http import HttpResponse
from .models import Instansi
from .resources import InstansiResource
def export_instansi(request):
instansi = InstansiResource()
dataset = instansi.export()
response = HttpResponse(dataset.xls, content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename=Data-Instansi-PKL-2021.xls'
return response
def export_instansi_rpl(request):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="Daftar-Instansi-RPL.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('Daftar Instansi RPL')
# sheet header, first row
row_num = 0
font_style = xlwt.XFStyle()
font_style.font.bold = True
columns = ['Nama', 'Alamat', 'Pimpinan', 'Kontak', 'Email', 'Slot']
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num], font_style)
# sheet body, remaining rows
font_style = xlwt.XFStyle()
rows = Instansi.objects.filter(grup='RPL').values_list('nama', 'alamat', 'pimpinan', 'kontak', 'email', 'limit')
for row in rows:
row_num = row_num + 1
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
wb.save(response)
return response
def export_instansi_tkj(request):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="Daftar-Instansi-TKJ.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('Daftar Instansi TKJ')
# sheet header, first row
row_num = 0
font_style = xlwt.XFStyle()
font_style.font.bold = True
columns = ['Nama', 'Alamat', 'Pimpinan', 'Kontak', 'Email', 'Slot']
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num], font_style)
# sheet body, remaining rows
font_style = xlwt.XFStyle()
rows = Instansi.objects.filter(grup='TKJ').values_list('nama', 'alamat', 'pimpinan', 'kontak', 'email', 'limit')
for row in rows:
row_num = row_num + 1
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
wb.save(response)
return response
|
# -*- coding:utf8 -*-
# File : __init__.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 17/08/2017
#
# This file is part of TensorArtist.
from .adv import *
from .env import *
|
#!/usr/bin/env python
import colorsys
import math
import time
import unicornhathd
print("""Ubercorn rainbow 2x1
An example of how to use a 2-wide by 1-tall pair of Ubercorn matrices.
Press Ctrl+C to exit!
""")
unicornhathd.brightness(0.6)
# Enable addressing for Ubercorn matrices
unicornhathd.enable_addressing()
# Set up buffer shape to be 32 wide and 16 tall
unicornhathd.setup_buffer(32, 16)
# Set up display 0 on left, and display 1 on right
unicornhathd.setup_display(0, 0, 0, 0)
unicornhathd.setup_display(1, 16, 0, 0)
step = 0
try:
while True:
step += 1
for x in range(0, 32):
for y in range(0, 16):
dx = 7
dy = 7
dx = (math.sin(step / 20.0) * 15.0) + 7.0
dy = (math.cos(step / 15.0) * 15.0) + 7.0
sc = (math.cos(step / 10.0) * 10.0) + 16.0
h = math.sqrt(math.pow(x - dx, 2) + math.pow(y - dy, 2)) / sc
r, g, b = colorsys.hsv_to_rgb(h, 1, 1)
r *= 255.0
g *= 255.0
b *= 255.0
unicornhathd.set_pixel(x, y, r, g, b)
unicornhathd.show()
time.sleep(1.0 / 60)
except KeyboardInterrupt:
unicornhathd.off()
|
# The MIT License (MIT)
#
# Copyright (c) 2017-2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import re
import argschema from './argschema'
import semver from './semver'
import manifest from './manifest'
spec = '[[@<scope>/]<name>[@<version>]][/<module>][:<member>]'
regex = re.compile('''^
(?:
(?: @(?P<scope> [A-z0-9\.\-_]+)/)?
(?P<name> [A-z0-9\.\-_]+)
(?: @(?P<version> [^/:]*))? # Version is actually a semver.Selector
)?
(?: /(?P<module> [A-z0-9\.\-_]+))?
(?: :(?P<member> [A-z0-9\.\-_]+))?
$''', re.X)
package_regex = re.compile('^(?:@([A-z0-9\.\-_]+)/)?([A-z0-9\.\-_]+)$')
class Ref(object):
"""
Represents a the contents of a reference string. Note that the #version
member of a #PackageRef is always a #semver.Selector, if it is set.
"""
def __init__(self, package, version, module, member):
argschema.validate('package', package, {'type': [None, Package]})
argschema.validate('version', version, {'type': [None, str, semver.Selector]})
argschema.validate('module', module, {'type': [None, str]})
argschema.validate('member', member, {'type': [None, str]})
if not package and version:
raise ValueError('can not specify version without package name')
if isinstance(version, str):
version = semver.Selector(version)
self.package = package
self.version = version
self.module = module
self.member = member
def __bool__(self):
return any((self.package, self.version, self.module, self.member))
__nonzero__ = __bool__
def __str__(self):
package = self.package
if package:
result = str(package)
if self.version:
result += '@' + str(self.version)
else:
if self.version:
raise ValueError('version can not be specified without a package')
result = ''
if self.module:
result += '/' + self.module
if self.member:
result += ':' + self.member
return result
def __unicode__(self):
return unicode(str(self))
def __repr__(self):
return '<Ref "{}">'.format(self)
def __eq__(self, other):
if isinstance(other, Ref):
return (self.package, self.version, self.module, self.member) == \
(other.package, other.version, other.module, other.member)
return False
class Package(object):
"""
Represents a package identifier.
"""
def __init__(self, scope, name):
if name in ('.', '..'):
raise ValueError('invalid package name: {!r}'.format(name))
if not name and scope:
raise ValueError('package name can not consist of only a scope')
self.scope = scope
self.name = name
def __str__(self):
if self.scope:
return '@{}/{}'.format(self.scope, self.name)
return self.name
def __unicode__(self):
return unicode(str(self))
def __iter__(self):
yield self.scope
yield self.name
def __eq__(self, other):
if isinstance(other, Package):
return (self.scope, self.name) == (other.scope, other.name)
def parse(s):
"""
Parse a reference string and returns a #Ref object. If the reference string
is invalid, a #ValueError is raised.
"""
m = regex.match(s)
if not m:
raise ValueError('invalid refstring: "{}"'.format(s))
scope, name, version, module, member = m.groups()
package = Package(scope, name) if (scope or name) else None
try:
return Ref(package, version, module, member) # can be raised for the version selector
except ValueError as exc:
raise ValueError('invalid refstring: "{}"'.format(exc))
def parse_package(s):
"""
Parse only a package name of the format `[@<scope>/]<name>`. Returns a
tuple of (scope, name).
"""
m = package_regex.match(s)
if not m:
raise ValueError('invalid package name: {!r}'.format(s))
return Package(*m.groups())
def join(package=None, version=None, module=None, member=None):
if package is not None:
package = parse_package(str(package))
return str(Ref(package, version, module, member))
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
## Opt transformations
# Test 2-opt
# Add a quality ratio to stop the 2-opt optimization
# 3-opt
# 4-opt
# k-opt
##
## 2-opt
def swap2opt(path, i, j):
new_path = path[i:j]
new_path.reverse()
return path[:i]+new_path+path[j:]
def optimization2opt(graph, path, n):
""" Search for a better path using 2-opt swaps """
best_distance = getPathLength(graph, path)
best_path = path
for i in range(1, n):
for j in range(i, n):
new_path = swap2opt(path, i, j)
new_distance = getPathLength(graph, new_path)
if new_distance < best_distance:
best_path = optimization2opt(graph, new_path, n)
return best_path
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as onp
from jax.numpy import lax_numpy as np
from jax import core
from jax import lax
from jax.interpreters import xla
from jax.interpreters import ad
from jax.util import partial
from jax.abstract_arrays import ShapedArray
from jax.core import Primitive
from jax.lax import (standard_primitive, standard_unop, binop_dtype_rule,
_float, _complex, _input_dtype)
# traceables
def cholesky(x): return cholesky_p.bind(x)
def qr(x, full_matrices=True):
q, r = qr_p.bind(x, full_matrices=full_matrices)
return q, r
def triangular_solve(a, b, left_side=False, lower=False, transpose_a=False,
conjugate_a=False):
return triangular_solve_p.bind(
a, b, left_side=left_side, lower=lower, transpose_a=transpose_a,
conjugate_a=conjugate_a)
# utilities
def _T(x):
return np.swapaxes(x, -1, -2)
# primitives
def cholesky_jvp_rule(primals, tangents):
x, = primals
sigma_dot, = tangents
L = cholesky_p.bind(x)
# Forward-mode rule from https://arxiv.org/pdf/1602.07527.pdf
sigma_dot = (sigma_dot + _T(sigma_dot)) / 2
phi = lambda X: np.tril(X) / (1 + np.eye(x.shape[-1]))
tmp = triangular_solve(L, sigma_dot,
left_side=False, transpose_a=True, lower=True)
L_dot = lax.dot(L, phi(triangular_solve(
L, tmp, left_side=True, transpose_a=False, lower=True)))
return L, L_dot
cholesky_p = standard_unop(_float, 'cholesky')
ad.primitive_jvps[cholesky_p] = cholesky_jvp_rule
triangular_solve_dtype_rule = partial(
binop_dtype_rule, _input_dtype, (_float | _complex, _float | _complex),
'triangular_solve')
def triangular_solve_shape_rule(a, b, left_side=False, **unused_kwargs):
if a.ndim < 2:
msg = "triangular_solve requires a.ndim to be at least 2, got {}."
raise TypeError(msg.format(a.ndim))
if a.shape[-1] != a.shape[-2]:
msg = ("triangular_solve requires the last two dimensions of a to be equal "
"in size, got a.shape of {}.")
raise TypeError(msg.format(a.shape))
if a.shape[:-2] != b.shape[:-2]:
msg = ("triangular_solve requires both arguments to have the same number "
"of dimensions and equal batch dimensions, got {} and {}.")
raise TypeError(msg.format(a.shape, b.shape))
common_dim = -2 if left_side else -1
if a.shape[-1] != b.shape[common_dim]:
msg = "Incompatible shapes for arguments to triangular_solve: {} and {}."
raise TypeError(msg.format(a.shape, b.shape))
return b.shape
def triangular_solve_transpose_rule(
cotangent, a, b, left_side, lower, transpose_a, conjugate_a):
assert a is not None and b is None
cotangent_b = triangular_solve(a, cotangent, left_side, lower,
not transpose_a, conjugate_a)
return [None, cotangent_b]
triangular_solve_p = standard_primitive(
triangular_solve_shape_rule, triangular_solve_dtype_rule,
'triangular_solve')
ad.defjvp(triangular_solve_p,
None,
lambda g_b, a, b, **kwargs: triangular_solve(a, g_b, **kwargs))
ad.primitive_transposes[triangular_solve_p] = triangular_solve_transpose_rule
def qr_impl(operand, full_matrices):
q, r = xla.apply_primitive(qr_p, operand, full_matrices=full_matrices)
return core.pack((q, r))
def qr_translation_rule(c, operand, full_matrices):
return c.QR(operand, full_matrices=full_matrices)
def qr_abstract_eval(operand, full_matrices):
if isinstance(operand, ShapedArray):
if operand.ndim < 2:
raise ValueError("Argument to QR decomposition must have ndims >= 2")
batch_dims = operand.shape[:-2]
m = operand.shape[-2]
n = operand.shape[-1]
k = m if full_matrices else min(m, n)
q = ShapedArray(batch_dims + (m, k), operand.dtype)
r = ShapedArray(batch_dims + (k, n), operand.dtype)
else:
q = operand
r = operand
return core.AbstractTuple((q, r))
def qr_dtype_rule(operand, full_matrices=True):
return operand.dtype
qr_p = Primitive('qr')
qr_p.def_impl(qr_impl)
qr_p.def_abstract_eval(qr_abstract_eval)
xla.translations[qr_p] = qr_translation_rule
|
import senteval
config = {
'classifier': 'log reg',
'nhid': 0,
'optim': 'adam',
'batch_size': 64,
'tenacity': 5,
'epoch_size': 4
}
def run_log_reg(params_senteval, batcher, prepare, probing_tasks):
classifier_config = {**params_senteval['classifier'], **config}
params_senteval['classifier'] = classifier_config
se = senteval.engine.SE(params_senteval, batcher, prepare)
results = se.eval(probing_tasks)
return results
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root):
serial = ['']
def recurse(node):
if node == None:
serial[0]+=("% ")
else:
serial[0]+=str(node.val) + " "
recurse(node.left)
recurse(node.right)
recurse(root)
return serial[0]
def deserialize(self, data):
input = data.split(' ')
def recurse():
val = input.pop(0)
if val == '%':
return None
cur = TreeNode(val)
cur.left = recurse()
cur.right = recurse()
return cur
return recurse()
s = Codec()
One = TreeNode(1)
Two = TreeNode(2)
Three = TreeNode(3)
Four = TreeNode(4)
Five = TreeNode(5)
One.left = Two
One.right = Three
Three.left = Four
Three.right = Five
r = (s.deserialize(s.serialize(One)))
print (r.val)
print(r.left.val)
print(r.right.val)
print(r.right.left.val)
print(r.right.right.val)
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
|
import serial
import json
import time
import binascii
import rclpy
from rclpy.node import Node
from std_msgs.msg import String
class SerialChannels:
def __init__(self, CHANNELS):
self.channels = []
self.numChannels = CHANNELS - 1
self.frame_error = 0
self.failsafe = 0
class SerialRCReceiver(Node):
def __init__(self):
super().__init__('serial_rc_receiver')
self.serial = serial.Serial() #I think this is safe to remove
self.publisher_ = self.create_publisher(String, 'serial_rc', 10)
timer_period = 0.01 # seconds, ALSO might need to change this, not certain how fast the rc decoding runs
self.timer = self.create_timer(timer_period, self.timer_callback)
#RC Variables
self.BUFFER_LENGTH = 25
self.CHANNELS = 18
self.MAX_READ_ATTEMPTS = 32
self.buffer = []
self.bufferOffset = 0
self.JetsonSerial = serial.Serial(
port="/dev/ttyTHS1", # RX terminal THS1 (Port 10 on J41 Header)
baudrate=95000,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_TWO,
)
self.serialData = SerialChannels(self.CHANNELS)
time.sleep(1)#let port initialize
def timer_callback(self):
#get inputs
self.receive()
msg = String()
msg.data = json.dumps(self.readLineToJson())
self.publisher_.publish(msg)
self.get_logger().info('Publishing: "%s"' % msg.data)
def readLineToJson(self):
if self.serialData.channels:
return {"state1": self.serialData.channels[0],
"ballast": self.serialData.channels[1],
"rudder": self.serialData.channels[2],
"manual": self.serialData.channels[4],
"state2": self.serialData.channels[5]}
else:#just returning 0 for now but not sure what should actually be sent when no data available
return {"state1": 0,
"ballast": 892,
"rudder": 934,
"manual": 1130,
"state2": 992}
def receive(self):
data = 0
counter = 0
while self.JetsonSerial.in_waiting and (self.bufferOffset < self.BUFFER_LENGTH) and (counter < self.MAX_READ_ATTEMPTS):
data = int(self.JetsonSerial.read().hex(), 16)
if self.bufferOffset == 0 and data != 0x0f:
data = 0
continue
self.buffer.append(data & 0xff)
self.bufferOffset += 1
if (self.bufferOffset == self.BUFFER_LENGTH):
self.JetsonSerial.reset_input_buffer()
if self.decodeSBUS():
if self.serialData.failsafe:
self.serialData.channels = []
print("Failsafe failed")
elif self.serialData.frame_error:
self.serialData.channels = []
print("Frame error")
elif self.serialData.failsafe or self.serialData.frame_error:
print("Successful decode")
self.buffer.clear()
self.bufferOffset = 0
def decodeSBUS(self):
if (self.buffer[0] != 0x0f):
print("Incorrect start bit")
return False
if (self.buffer[self.BUFFER_LENGTH - 1] != 0x00):
print("Incorrect stop bit")
return False
# print("good bit")
# print(buffer)
self.serialData.channels = []
dataChannels = self.serialData.channels
dataChannels.append((self.buffer[1] | self.buffer[2] << 8) & 0x07FF) # Channel 0
dataChannels.append((self.buffer[2] >> 3 | self.buffer[3] << 5) & 0x07FF) # Channel 1
dataChannels.append((self.buffer[3] >> 6 | self.buffer[4] << 2 | self.buffer[5] << 10) & 0x07FF) # Channel 2
dataChannels.append((self.buffer[5] >> 1 | self.buffer[6] << 7) & 0x07FF) # Channel 3
dataChannels.append((self.buffer[6] >> 4 | self.buffer[7] << 4) & 0x07FF) # Channel 4
dataChannels.append((self.buffer[7] >> 7 | self.buffer[8] << 1 | self.buffer[9] << 9) & 0x07FF) # Channel 5
dataChannels.append((self.buffer[9] >> 2 | self.buffer[10] << 6) & 0x07FF) # Channel 6
dataChannels.append((self.buffer[10] >> 5 | self.buffer[11] << 3) & 0x07FF) # Channel 7
dataChannels.append((self.buffer[12] | self.buffer[13] << 8) & 0x07FF) # Channel 8
dataChannels.append((self.buffer[13] >> 3 | self.buffer[14] << 5) & 0x07FF) # Channel 9
dataChannels.append((self.buffer[14] >> 6 | self.buffer[15] << 2 | self.buffer[16] << 10) & 0x07FF) # Channel 10
dataChannels.append((self.buffer[16] >> 1 | self.buffer[17] << 7) & 0x07FF) # Channel 11
dataChannels.append((self.buffer[17] >> 4 | self.buffer[18] << 4) & 0x07FF) # Channel 12
dataChannels.append((self.buffer[18] >> 7 | self.buffer[19] << 1 | self.buffer[20] << 9) & 0x07FF) # Channel 13
dataChannels.append((self.buffer[20] >> 2 | self.buffer[21] << 6) & 0x07FF) # Channel 14
dataChannels.append((self.buffer[21] >> 5 | self.buffer[22] << 3) & 0x07FF) # Channel 15
self.serialData.frame_error = (self.buffer[23] & (1 << 2)) != 0
self.serialData.failsafe = (self.buffer[23] & (1 << 3)) != 0
return True
def main(args=None):
rclpy.init(args=args)
serial_rc_receiver = SerialRCReceiver()
rclpy.spin(serial_rc_receiver)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
serial_rc_receiver.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
# Generated by Django 2.1.2 on 2018-11-09 21:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activity_id', models.IntegerField(null=True)),
('name', models.CharField(max_length=255, null=True)),
('type', models.CharField(max_length=30)),
('ride_date', models.DateTimeField()),
('distance', models.FloatField(null=True)),
('average_speed', models.FloatField(null=True)),
('average_watts', models.FloatField(null=True)),
('average_heartrate', models.FloatField(null=True)),
('max_heartrate', models.IntegerField(null=True)),
],
),
migrations.CreateModel(
name='ActivityStream',
fields=[
('activity', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='core.Activity')),
('time', models.FileField(upload_to='activities')),
('latlng', models.FileField(upload_to='activities')),
('distance', models.FileField(upload_to='activities')),
('altitude', models.FileField(upload_to='activities')),
('celocity_smooth', models.FileField(upload_to='activities')),
('heartrate', models.FileField(upload_to='activities')),
('cadende', models.FileField(upload_to='activities')),
('watts', models.FileField(upload_to='activities')),
('temp', models.FileField(upload_to='activities')),
('moving', models.FileField(upload_to='activities')),
('grade_smooth', models.FileField(upload_to='activities')),
],
),
migrations.AddField(
model_name='activity',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
from requests import get
from websocket import create_connection
from json import dumps, loads
from sys import argv
from os import listdir, path
from time import sleep
BASE_ADDRESS = "http://localhost:8080"
class Tab:
def __init__(self, res) -> None:
self.title = res["title"]
self.id = res["id"]
self.ws_url = res["webSocketDebuggerUrl"]
def evaluate_js(self, js):
ws = create_connection(self.ws_url)
ws.send(dumps({
"id": 1,
"method": "Runtime.evaluate",
"params": {
"expression": js,
"userGesture": True
}
}))
response = ws.recv()
ws.close()
return response
def __str__(self):
return self.title
def __repr__(self):
return self.title
def get_tabs():
while True:
try:
res = get("{}/json".format(BASE_ADDRESS)).json()
return [Tab(i) for i in res]
except Exception as e:
print("Could not fetch tabs from Steam CEF instance. Are you sure steam is running ?")
print(e)
print("Retrying in 5 seconds")
sleep(5)
def main():
tabs = get_tabs()
files = listdir(argv[1])
for file in files:
fp = open(path.join(argv[1], file, "main.js"), "r")
cfg = loads(fp.readline()[2:])
if not cfg.get("enabled"):
continue
print("Loading {} with cfg {}".format(file, cfg))
while True:
tab = next((i for i in tabs if i.title == cfg["target_tab"]), None)
if tab:
print("Found tab {} with ID {}. Injecting JS...".format(tab.title, tab.id))
print(tab.evaluate_js(fp.read()))
break
else:
print("Target tab {} not found in fetched tabs. Refreshing tabs and retrying in 5 seconds".format(cfg["target_tab"]))
tabs = get_tabs()
sleep(5)
if __name__ == "__main__":
main()
|
"""Convert logincreate timestamps to TIMESTAMP(timzone=True)
Revision ID: 3accc3d526ba
Revises: 2e25bc9a0896
Create Date: 2020-02-27 16:06:48.018000
"""
# revision identifiers, used by Alembic.
revision = '3accc3d526ba'
down_revision = '2e25bc9a0896'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
from sqlalchemy.dialects.postgresql import TIMESTAMP
def upgrade():
op.alter_column('logincreate', 'unixtime',
existing_type=sa.INTEGER(),
server_default=sa.func.now(),
type_=TIMESTAMP(timezone=True),
existing_nullable=False,
postgresql_using="to_timestamp(unixtime + 18000)",
new_column_name='created_at')
def downgrade():
op.alter_column('logincreate', 'created_at', server_default=None)
op.alter_column('logincreate', 'created_at',
existing_type=TIMESTAMP(timezone=True),
type_=sa.INTEGER(),
existing_nullable=False,
postgresql_using="extract(epoch from created_at) - 18000",
new_column_name='unixtime')
|
# # # # # a = 215
# # # # a = int(input("Input a"))
# # # a = 9000
# # # a = 3
# # #
# if True: # after : is the code block, must be indented
# print("True")
# print("This always runs because if statement is True")
# print("Still working in if block")
# # # if block has ended
# print("This runs no matter what because we are outside if ")
# # # # # after we go back to our normal indentation the if block is ended
# # # #
# a = 25
# if a > 10: # in Python when you see : next line will be indented
# # runs only when statement after if is True
# print("Do this when a is larger than 10")
# print(f"Still only runs when a > {a}")
# # we can keep doing things when a > 10 here
# # #
# # # # # here we have exited if
# print("This will always print no matter what")
# # # # # # # #
# # # # # # #
# # # # # # a = -333
# # # # # # a = 200
# a = 44
# a = 15
# if a > 10: # in Python when you see : next line will be indented
# # runs only when statement after if is True
# print("Again Do this when a is larger than 10")
# print("Indeed a is", a)
# else: # when a is <= 10
# print("Only when a is less or equal to 10")
# print("Indeed a is only", a)
# # we could do more stuff here when a is not larger than 10
# # # #
# # # # # # a = 10
a = 200
a = -95
a = 10
# # a = -355
# if we need more than 2 distinct paths
# if a > 10: # in Python when you see : next line will be indented
# # runs only when statement after if is True
# print("Again Do this when a is larger than 10", a)
# elif a < 10:
# print("ahh a is less than 10", a)
# else: # so a must be 10 no other choices you do not need to check, after all other choices are exhausted
# print("Only when a is equal to 10 since we checked other cases", a)
# # we could do more stuff here when a is not larger than 10
# # # # # # #
# print("Back to normal program flow which always runs no matter what a is")
# # # # # #
# # # # # #
# # # # # # #
# # # # without else both of these could run
# a = 20
# # # a = 7
# if a > 5:
# print("a is larger than 5")
# # the below if statement is not related to the if statement above
# if a > 10:
# print("a is larger than 10")
# # # # # #
# # # # # #
# # # # # #
# # # # # #
# # # # # #
# # # # # #
# # # # # #
# # # # # #
# # # # # # # if else elif
# # # # # a = 190
# a = int(input("give me an a! "))
# if a > 10:
# print("a is larger than 10")
# print("This will only happen when a > 10")
# if a >= 200: # so we can nest ifs inside another if
# print("a is truly big over or equal 200")
# else:
# print("a is more than 10 but no more than 199")
# elif a < 10:
# print("a is less than 10", a)
# else: # if a == 10
# print("a is equal to 10", a)
# # #
# # # # print("This will always happen no matter the a value")
# # # #
# # # # b = -8500
# # # # b = 6
# # # # b = 555
# # # # b = 9000
# # # # if b < 0:
# # # # print("Less than 0", b)
# # # # elif b < 10:
# # # # print("Less than 10 but more or equal to 0", b)
# # # # elif b < 9000:
# # # # pass # empty operation
# # # # # print("At least 10 but less than 9000", b)
# # # # else:
# # # # print("9000 or more!", b)
# # # # #
# # # # if b < 0:
# # # # print("Less than 0", b)
# # # #
# # # # if b < 10:
# # # # print("Less than 10", b)
# # # #
# # # # if b < 9000:
# # # # print("less than 9000", b)
# # # # else:
# # # # print("9000 or more!", b)
# # # # # #
# # # # c = None
# # # # c = 5
# # # # if c == None:
# # # # print("There is Nothing")
# # # # else:
# # # # print("There is something")
# # # # # #
# # # #
a = -100
if 2 < 3 < 8 < a:
print(f"2 < 3 < 8 < {a} is it a True statement? ", 2 < 3 < 8 < a)
else:
print(f"2 < 3 < 8 < {a} is it a True statement?", 2 < 3 < 8 < a)
|
import os
import sys
ret = []
for x in os.walk(sys.path[-1],'*.py'):
for y in x[2]:
ss = y.split('.')
if ss[-1] == 'py': ret.append(ss[0])
print(ret)
|
# _*_ codig utf8 _*_
import numpy as np
import hmm_backward, hmm_forward
def hmm_kesai(alpha, beta, A, B, Q):
'''
计算观测序列中t时刻和t+1时刻状态分别为si 和sj的概率
:param alpha: 前向概率矩阵
:param beta: 后续概率矩阵
:param A: 状态转移矩阵
:param B:状态到观测值转移矩阵
:param Q:观测状态序列
:return: ξ矩阵
'''
N = alpha.shape[1]
T = alpha.shape[0]
kesai = np.zeros((T-1, N, N))
# 求α* β的和
for t in np.arange(T-1):
sum = 0
for i in np.arange(N):
for j in np.arange(N):
sum += alpha[t][i] * A[i][j] * B[j][Q[t + 1]] * beta[t + 1][j]
for i in np.arange(N):
for j in np.arange(N):
kesai[t][i][j] = (alpha[t][i] * A[i][j] * B[j][Q[t + 1]] * beta[t + 1][j]) / sum
return kesai
if __name__ == '__main__':
pi = np.array([0.2, 0.5, 0.3])
A = np.array([
[0.5, 0.4, 0.1],
[0.2, 0.2, 0.6],
[0.2, 0.5, 0.3]
])
B = np.array([
[0.4, 0.6],
[0.8, 0.2],
[0.5, 0.5]
])
Q = [0, 1, 0, 0, 1]
beta, _ = hmm_backward.hmm_backward(pi, A, B, Q)
alpha, _ = hmm_forward.hmm_forward(pi, A, B, Q)
kesai = hmm_kesai(alpha, beta,A,B,Q)
print('ξ={}'.format(kesai))
|
from typing import Type, Dict
class SsaContext():
"""
:ivar objCnt: the dictionary of object counts used for name generating
"""
def __init__(self):
self.objCnt = 0 # : Dict[Type, int] = {}
def genName(self, obj):
prefix = getattr(obj, "GEN_NAME_PREFIX", "o")
i = self.objCnt # .get(obj.__class__, 0)
self.objCnt = i + 1 # [obj.__class__] = i
return f"{prefix}{i:d}"
|
import PyPDF2
from PyPDF2 import PdfFileReader
from pprint import pprint
from tkinter import filedialog
from tkinter import *
def fileName():
root = Tk()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("pdf files","*.pdf"),("all files","*.*")))
return str(root.filename)
def walk(obj, fnt, emb):
'''
We create and add to two sets, fnt = fonts used and emb = fonts embedded.
'''
if not hasattr(obj, 'keys'):
return None, None
fontkeys = set(['/FontFile', '/FontFile2', '/FontFile3'])
if '/BaseFont' in obj:
fnt.add(obj['/BaseFont'])
if '/FontName' in obj:
if [x for x in fontkeys if x in obj]:# test to see if there is FontFile
emb.add(obj['/FontName'])
for k in obj.keys():
walk(obj[k], fnt, emb)
return fnt, emb# return the sets for each page
def getFont():
fname = fileName()
pdf = PdfFileReader(fname)
fonts = set()
embedded = set()
for page in pdf.pages:
obj = page.getObject()
# updated via this answer:
# https://stackoverflow.com/questions/60876103/use-pypdf2-to-detect-non-embedded-fonts-in-pdf-file-generated-by-google-docs/60895334#60895334
# in order to handle lists inside objects. Thanks misingnoglic !
# untested code since I don't have such a PDF to play with.
if type(obj) == PyPDF2.generic.ArrayObject: # You can also do ducktyping here
for i in obj:
if hasattr(i, 'keys'):
f, e = walk(i, fonts, embedded_fonts)
fonts = fonts.union(f)
embedded = embedded.union(e)
else:
f, e = walk(obj['/Resources'], fonts, embedded)
fonts = fonts.union(f)
embedded = embedded.union(e)
unembedded = fonts - embedded
print ('Font List')
pprint(sorted(list(fonts)))
if unembedded:
print ('Unembedded Fonts')
pprint(unembedded)
|
import unittest
import numpy as np
from src.davil.lists import partition_train_test_validation
class TestPartitions(unittest.TestCase):
def test(self):
def check_sets(lists):
for ts in zip(*lists):
for t in ts:
t0 = ts[0].split('_')[0]
assert t0 == t.split('_')[0]
xs = [np.arange(20) for _ in range(4)]
chars = [chr(x) for x in range(97, 101, 1)]
xs = [[str(xx) + '_' + c for xx in x] for x, c in zip(xs, chars)]
trs, tes, vals = partition_train_test_validation(xs)
check_sets(trs)
check_sets(tes)
check_sets(vals)
assert len(trs[0]) == round(20 * 0.7)
assert len(tes[0]) == round(20 * 0.1)
assert len(vals[0]) == round(20 * 0.2)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 13 11:12:06 2021
@author: altair
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import LSTM, Dropout, Dense
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
df = pd.read_csv("NSE-TATAGLOBAL11.csv")
print(df.head())
# analyze data in proper date time
df["Date"] = pd.to_datetime(df.Date, format = "%Y-%m-%d")
df.index = df['Date']
plt.figure(figsize=(16,8))
plt.plot(df["Close"], label = "Close Price History")
#implementation
# linear regression
# setting index as date values
df['Date'] = pd.to_datetime(df.Date, format= '%Y-%m-%d')
df.index = df['Date']
# sorting
data = df.sort_index(ascending= True, axis= 0)
data['Date'] = pd.to_numeric(pd.to_datetime(data['Date']))
# creating a seperate dataset
new_data = pd.DataFrame(index= range(0, len(df)), columns= ['Date', 'Close'])
for i in range(0,len(data)):
new_data['Date'][i] = data['Date'][i]
new_data['Close'][i] = data['Close'][i]
#new_data['mon_fri'] = 0
#for i in range(0,len(new_data)):
# if (new_data['Dayofweek'][i] == 0 or new_data['Dayofweek'][i] == 4):
# new_data['mon_fri'][i] = 1
# else:
# new_data['mon_fri'][i] = 0
from fastai.tabular.all import *
add_datepart(new_data, 'Date')
new_data.drop('Elapsed', axis= 1, inplace= True) # elapsed will be the time stamp
# split into train and validation
train = new_data[:987]
valid = new_data[987:]
x_train = train.drop('Close', axis= 1)
y_train = train['Close']
x_valid = valid.drop('Close', axis= 1)
y_valid = valid['Close']
# implement linear regression
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(x_train, y_train)
# results
# make predictions and find the RMSE
preds = model.predict(x_valid)
rms = np.sqrt(np.mean(np.power((np.array(y_valid)-np.array(preds)),2)))
print('RMS:', rms)
# plot
valid['Predictions'] = preds
#valid['Predictions'] = preds
#valid.loc[:,'Predictions']= preds
valid.loc[:, 'Predictions'] = preds
valid.index = new_data[987:].index
train.index = new_data[:987].index
plt.plot(train['Close'])
plt.plot(valid[['Close','Predictions']])
# knn
from sklearn import neighbors
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MonMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
#scaling data
x_train_scaled = scaler.fit_transform(x_train)
x_train = pd.DataFrame(x_ttrain_scaled)
x_valid_scaled = scaler.fit_transform(x_valid)
x_valid = pd.DataFrame(x_valid_scaled)
# using gridsearch to find the best parameter
params = {'n_neighbors' : [2, 3, 4, 5, 6, 7, 8, 9]}
knn = neighbors.KNeighborsRegressor()
model = GridSearchCV(knn, params, cv=5)
# fit the model and make predictions
model.fit(x_train, y_train)
preds = model.predict(x_valid)
#rmse
rms = np.sqrt(np.mean(np.power((np.array(y_valid)-np.array(preds)),2)))
print('RMS:', rms)
# plots
#valid['Predictions'] = 0
#valid['Predictions'] = preds
valid.loc[:,-1]= preds
plt.plot(train['Close'])
plt.plot(valid[['Close','Predictions']])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.