gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow import AirflowException
from airflow.gcp.hooks.spanner import SpannerHook
from tests.compat import PropertyMock, mock
from tests.gcp.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST, mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
SPANNER_INSTANCE = 'instance'
SPANNER_CONFIGURATION = 'configuration'
SPANNER_DATABASE = 'database-name'
class TestGcpSpannerHookDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.gcp.hooks.base.CloudBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id):
self.spanner_hook_default_project_id = SpannerHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.spanner.SpannerHook.client_info", new_callable=mock.PropertyMock)
@mock.patch("airflow.gcp.hooks.spanner.SpannerHook._get_credentials")
@mock.patch("airflow.gcp.hooks.spanner.Client")
def test_spanner_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.spanner_hook_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.spanner_hook_default_project_id._client, result)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_get_existing_instance(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_instance(instance_id=SPANNER_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
self.assertIsNotNone(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_get_existing_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_instance(instance_id=SPANNER_INSTANCE,
project_id='new-project')
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
self.assertIsNotNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_create_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
create_method = instance_method.return_value.create
create_method.return_value = False
res = self.spanner_hook_default_project_id.create_instance(
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=1,
display_name=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(
instance_id='instance',
configuration_name='configuration',
display_name='database-name',
node_count=1)
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_create_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
create_method = instance_method.return_value.create
create_method.return_value = False
res = self.spanner_hook_default_project_id.create_instance(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=1,
display_name=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(
instance_id='instance',
configuration_name='configuration',
display_name='database-name',
node_count=1)
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_update_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
update_method = instance_method.return_value.update
update_method.return_value = False
res = self.spanner_hook_default_project_id.update_instance(
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=2,
display_name=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(
instance_id='instance', configuration_name='configuration', display_name='database-name',
node_count=2)
update_method.assert_called_once_with()
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_update_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
update_method = instance_method.return_value.update
update_method.return_value = False
res = self.spanner_hook_default_project_id.update_instance(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=2,
display_name=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(
instance_id='instance', configuration_name='configuration', display_name='database-name',
node_count=2)
update_method.assert_called_once_with()
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_delete_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
delete_method.return_value = False
res = self.spanner_hook_default_project_id.delete_instance(
instance_id=SPANNER_INSTANCE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(
'instance')
delete_method.assert_called_once_with()
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
delete_method.return_value = False
res = self.spanner_hook_default_project_id.delete_instance(
project_id='new-project',
instance_id=SPANNER_INSTANCE)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(
'instance')
delete_method.assert_called_once_with()
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_get_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_exists_method = instance_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
self.assertIsNotNone(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_get_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_exists_method = instance_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_database(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
self.assertIsNotNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_create_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_create_method = database_method.return_value.create
res = self.spanner_hook_default_project_id.create_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[])
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name', ddl_statements=[])
database_create_method.assert_called_once_with()
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_create_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_create_method = database_method.return_value.create
res = self.spanner_hook_default_project_id.create_database(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[])
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name', ddl_statements=[])
database_create_method.assert_called_once_with()
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_update_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_ddl_method = database_method.return_value.update_ddl
res = self.spanner_hook_default_project_id.update_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[])
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_update_ddl_method.assert_called_once_with(ddl_statements=[], operation_id=None)
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_update_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_ddl_method = database_method.return_value.update_ddl
res = self.spanner_hook_default_project_id.update_database(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[])
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_update_ddl_method.assert_called_once_with(ddl_statements=[], operation_id=None)
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_delete_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.delete_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
database_drop_method.assert_called_once_with()
self.assertTrue(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_delete_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.delete_database(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
database_drop_method.assert_called_once_with()
self.assertTrue(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_execute_dml(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
run_in_transaction_method = database_method.return_value.run_in_transaction
res = self.spanner_hook_default_project_id.execute_dml(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
queries='')
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
run_in_transaction_method.assert_called_once_with(mock.ANY)
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_execute_dml_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
run_in_transaction_method = database_method.return_value.run_in_transaction
res = self.spanner_hook_default_project_id.execute_dml(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
queries='')
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
run_in_transaction_method.assert_called_once_with(mock.ANY)
self.assertIsNone(res)
class TestGcpSpannerHookNoDefaultProjectID(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.gcp.hooks.base.CloudBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id):
self.spanner_hook_no_default_project_id = SpannerHook(gcp_conn_id='test')
@mock.patch(
"airflow.gcp.hooks.spanner.SpannerHook.client_info",
new_callable=mock.PropertyMock
)
@mock.patch(
"airflow.gcp.hooks.spanner.SpannerHook._get_credentials",
return_value="CREDENTIALS"
)
@mock.patch("airflow.gcp.hooks.spanner.Client")
def test_spanner_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.spanner_hook_no_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.spanner_hook_no_default_project_id._client, result)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_get_existing_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.spanner_hook_no_default_project_id.get_instance(instance_id=SPANNER_INSTANCE)
get_client.assert_not_called()
instance_method.assert_not_called()
instance_exists_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_get_existing_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.spanner_hook_no_default_project_id.get_instance(instance_id=SPANNER_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
self.assertIsNotNone(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_get_non_existing_instance(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
res = self.spanner_hook_no_default_project_id.get_instance(instance_id=SPANNER_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_create_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
create_method = instance_method.return_value.create
create_method.return_value = False
with self.assertRaises(AirflowException) as cm:
self.spanner_hook_no_default_project_id.create_instance(
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=1,
display_name=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='example-project')
get_client.assert_not_called()
instance_method.assert_not_called()
create_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_create_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
create_method = instance_method.return_value.create
create_method.return_value = False
res = self.spanner_hook_no_default_project_id.create_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=1,
display_name=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(
instance_id='instance',
configuration_name='configuration',
display_name='database-name',
node_count=1)
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_update_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
update_method = instance_method.return_value.update
update_method.return_value = False
with self.assertRaises(AirflowException) as cm:
self.spanner_hook_no_default_project_id.update_instance(
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=2,
display_name=SPANNER_DATABASE)
get_client.assert_not_called()
instance_method.assert_not_called()
update_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_update_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
update_method = instance_method.return_value.update
update_method.return_value = False
res = self.spanner_hook_no_default_project_id.update_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=2,
display_name=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(
instance_id='instance', configuration_name='configuration', display_name='database-name',
node_count=2)
update_method.assert_called_once_with()
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_delete_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
delete_method.return_value = False
with self.assertRaises(AirflowException) as cm:
self.spanner_hook_no_default_project_id.delete_instance(
instance_id=SPANNER_INSTANCE)
get_client.assert_not_called()
instance_method.assert_not_called()
delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
delete_method.return_value = False
res = self.spanner_hook_no_default_project_id.delete_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(
'instance')
delete_method.assert_called_once_with()
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_get_database_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_exists_method = instance_method.return_value.exists
database_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.spanner_hook_no_default_project_id.get_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE)
get_client.assert_not_called()
instance_method.assert_not_called()
database_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_get_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_exists_method = instance_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_no_default_project_id.get_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
self.assertIsNotNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_create_database_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_create_method = database_method.return_value.create
with self.assertRaises(AirflowException) as cm:
self.spanner_hook_no_default_project_id.create_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[])
get_client.assert_not_called()
instance_method.assert_not_called()
database_method.assert_not_called()
database_create_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_create_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_create_method = database_method.return_value.create
res = self.spanner_hook_no_default_project_id.create_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[])
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name', ddl_statements=[])
database_create_method.assert_called_once_with()
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_update_database_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_method = database_method.return_value.update
with self.assertRaises(AirflowException) as cm:
self.spanner_hook_no_default_project_id.update_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[])
get_client.assert_not_called()
instance_method.assert_not_called()
database_method.assert_not_called()
database_update_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_update_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_ddl_method = database_method.return_value.update_ddl
res = self.spanner_hook_no_default_project_id.update_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[])
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_update_ddl_method.assert_called_once_with(ddl_statements=[], operation_id=None)
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_update_database_overridden_project_id_and_operation(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_ddl_method = database_method.return_value.update_ddl
res = self.spanner_hook_no_default_project_id.update_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
operation_id="operation",
ddl_statements=[])
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_update_ddl_method.assert_called_once_with(ddl_statements=[], operation_id="operation")
self.assertIsNone(res)
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_delete_database_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.spanner_hook_no_default_project_id.delete_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE)
get_client.assert_not_called()
instance_method.assert_not_called()
database_method.assert_not_called()
database_exists_method.assert_not_called()
database_drop_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_delete_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_no_default_project_id.delete_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
database_drop_method.assert_called_once_with()
self.assertTrue(res)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_delete_database_missing_database(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = False
self.spanner_hook_no_default_project_id.delete_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
database_drop_method.assert_not_called()
@mock.patch(
'airflow.gcp.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_execute_dml_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
run_in_transaction_method = database_method.return_value.run_in_transaction
with self.assertRaises(AirflowException) as cm:
self.spanner_hook_no_default_project_id.execute_dml(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
queries='')
get_client.assert_not_called()
instance_method.assert_not_called()
database_method.assert_not_called()
run_in_transaction_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.spanner.SpannerHook._get_client')
def test_execute_dml_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
run_in_transaction_method = database_method.return_value.run_in_transaction
res = self.spanner_hook_no_default_project_id.execute_dml(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
queries='')
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
run_in_transaction_method.assert_called_once_with(mock.ANY)
self.assertIsNone(res)
|
|
# *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
# Copyright 2013 Thomas Calmant
import os
import sys
__all__ = ['getDefaultJVMPath',
'JVMNotFoundException', 'JVMNotSupportedException']
try:
import winreg
except ImportError:
winreg = None
class JVMNotFoundException(ValueError):
""" Exception raised when no JVM was found in the search path.
This exception is raised when the all of the places searched did not
contain a JVM. The locations searched depend on the machine architecture.
To avoid this exception specify the JAVA_HOME environment variable as a
valid jre or jdk root directory.
"""
pass
class JVMNotSupportedException(ValueError):
""" Exception raised when the JVM is not supported.
This exception is raised after a search found a valid Java home directory
was found, but the JVM shared library found is not supported. Typically
this occures when the JVM does not match the architecture of Python
32 vs 64 bit, or the JVM is older than the version used to compile
JPype.
"""
pass
def getDefaultJVMPath():
"""
Retrieves the path to the default or first found JVM library
Returns:
The path to the JVM shared library file
Raises:
JVMNotFoundException: If there was no JVM found in the search path.
JVMNotSupportedException: If the JVM was found was not compatible with
Python due to cpu architecture.
"""
if sys.platform == "win32":
finder = WindowsJVMFinder()
elif sys.platform == "darwin":
finder = DarwinJVMFinder()
else:
finder = LinuxJVMFinder()
return finder.get_jvm_path()
class JVMFinder(object):
"""
JVM library finder base class
"""
def __init__(self):
"""
Sets up members
"""
# Library file name
self._libfile = "libjvm.so"
# Predefined locations
self._locations = ("/usr/lib/jvm", "/usr/java")
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_known_locations)
def find_libjvm(self, java_home):
"""
Recursively looks for the given file
Parameters:
java_home(str): A Java home folder
filename(str): filename: Name of the file to find
Returns:
The first found file path, or None
"""
non_supported_jvm = ('cacao', 'jamvm')
found_non_supported_jvm = False
# Look for the file
for root, _, names in os.walk(java_home):
if self._libfile in names:
# Found it, but check for non supported jvms
candidate = os.path.split(root)[1]
if candidate in non_supported_jvm:
found_non_supported_jvm = True
continue # maybe we will find another one?
return os.path.join(root, self._libfile)
if found_non_supported_jvm:
raise JVMNotSupportedException("Sorry '{0}' is known to be "
"broken. Please ensure your "
"JAVA_HOME contains at least "
"another JVM implementation "
"(eg. server)"
.format(candidate))
# File not found
raise JVMNotFoundException("Sorry no JVM could be found. "
"Please ensure your JAVA_HOME "
"environment variable is pointing "
"to correct installation.")
def find_possible_homes(self, parents):
"""
Generator that looks for the first-level children folders that could be
Java installations, according to their name
Parameters:
parents (str[]): A list of parent directories
Returns:
A list of the possible JVM installation folders
"""
homes = []
java_names = ('jre', 'jdk', 'java')
for parent in parents:
# Fast exit if folder does not exist
if not os.path.exists(parent):
continue
for childname in sorted(os.listdir(parent)):
# Compute the real path
path = os.path.realpath(os.path.join(parent, childname))
if path in homes or not os.path.isdir(path):
# Already known path, or not a directory -> ignore
continue
# Check if the path seems OK
real_name = os.path.basename(path).lower()
for java_name in java_names:
if java_name in real_name:
# Correct JVM folder name
homes.append(path)
yield path
break
def check(self, jvm):
"""
Check if the jvm is valid for this architecture.
This method should be overriden for each architecture.
Raises:
JVMNotSupportedException: If the jvm is not supported.
"""
pass
def get_jvm_path(self):
"""
Retrieves the path to the default or first found JVM library
Returns:
The path to the JVM shared library file
Raises:
ValueError: No JVM library found or No Support JVM found
"""
jvm_notsupport_ext = None
for method in self._methods:
try:
jvm = method()
# If found check the architecture
if jvm:
self.check(jvm)
except NotImplementedError:
# Ignore missing implementations
pass
except JVMNotFoundException:
# Ignore not successful methods
pass
except JVMNotSupportedException as e:
jvm_notsupport_ext = e
else:
if jvm is not None:
return jvm
if jvm_notsupport_ext is not None:
raise jvm_notsupport_ext
raise JVMNotFoundException("No JVM shared library file ({0}) "
"found. Try setting up the JAVA_HOME "
"environment variable properly."
.format(self._libfile))
def _get_from_java_home(self):
"""
Retrieves the Java library path according to the JAVA_HOME environment
variable
Returns:
The path to the JVM library, or None
"""
# Get the environment variable
java_home = os.getenv("JAVA_HOME")
if java_home and os.path.exists(java_home):
# Get the real installation path
java_home = os.path.realpath(java_home)
if not os.path.exists(java_home):
java_home = os.getenv("JAVA_HOME")
# Look for the library file
return self.find_libjvm(java_home)
def _get_from_known_locations(self):
"""
Retrieves the first existing Java library path in the predefined known
locations
Returns:
The path to the JVM library, or None
"""
for home in self.find_possible_homes(self._locations):
jvm = self.find_libjvm(home)
if jvm is not None:
return jvm
class LinuxJVMFinder(JVMFinder):
"""
Linux JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
JVMFinder.__init__(self)
# Java bin file
self._java = "/usr/bin/java"
# Library file name
self._libfile = "libjvm.so"
# Predefined locations
self._locations = ("/usr/lib/jvm", "/usr/java", "/opt/sun")
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_bin,
self._get_from_known_locations)
def _get_from_bin(self):
"""
Retrieves the Java library path according to the real installation of
the java executable
:return: The path to the JVM library, or None
"""
# Find the real interpreter installation path
java_bin = os.path.realpath(self._java)
if os.path.exists(java_bin):
# Get to the home directory
java_home = os.path.abspath(os.path.join(os.path.dirname(java_bin),
'..'))
# Look for the JVM library
return self.find_libjvm(java_home)
class DarwinJVMFinder(LinuxJVMFinder):
"""
Mac OS X JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
LinuxJVMFinder.__init__(self)
# Library file name
self._libfile = "libjli.dylib"
self._methods = list(self._methods)
self._methods.append(self._javahome_binary)
# Predefined locations
self._locations = ('/Library/Java/JavaVirtualMachines',)
def _javahome_binary(self):
"""
for osx > 10.5 we have the nice util /usr/libexec/java_home available. Invoke it and
return its output. It seems this tool has been removed in osx 10.9.
"""
import platform
import subprocess
from distutils.version import StrictVersion
current = StrictVersion(platform.mac_ver()[0][:4])
if current >= StrictVersion('10.6') and current < StrictVersion('10.9'):
return subprocess.check_output(
['/usr/libexec/java_home']).strip()
def _checkJVMArch(jvmPath, maxsize=sys.maxsize):
import struct
IMAGE_FILE_MACHINE_I386 = 332
IMAGE_FILE_MACHINE_IA64 = 512
IMAGE_FILE_MACHINE_AMD64 = 34404
is64 = maxsize > 2**32
with open(jvmPath, "rb") as f:
s = f.read(2)
if s != b"MZ":
raise JVMNotSupportedException("JVM not valid")
f.seek(60)
s = f.read(4)
header_offset = struct.unpack("<L", s)[0]
f.seek(header_offset + 4)
s = f.read(2)
machine = struct.unpack("<H", s)[0]
if machine == IMAGE_FILE_MACHINE_I386:
if is64:
raise JVMNotSupportedException(
"JVM mismatch, python is 64 bit and JVM is 32 bit.")
elif machine == IMAGE_FILE_MACHINE_IA64 or machine == IMAGE_FILE_MACHINE_AMD64:
if not is64:
raise JVMNotSupportedException(
"JVM mismatch, python is 32 bit and JVM is 64 bit.")
else:
raise JVMNotSupportedException("Unable to determine JVM Type")
reg_keys = [r"SOFTWARE\JavaSoft\Java Runtime Environment",
r"SOFTWARE\JavaSoft\JRE",
]
class WindowsJVMFinder(JVMFinder):
"""
Windows JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
JVMFinder.__init__(self)
# Library file name
self._libfile = "jvm.dll"
# Search methods
self._methods = (self._get_from_java_home, self._get_from_registry)
def check(self, jvm):
_checkJVMArch(jvm)
def _get_from_registry(self):
"""
Retrieves the path to the default Java installation stored in the
Windows registry
:return: The path found in the registry, or None
"""
if not winreg:
return None
for location in reg_keys:
try:
jreKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, location)
cv = winreg.QueryValueEx(jreKey, "CurrentVersion")
versionKey = winreg.OpenKey(jreKey, cv[0])
winreg.CloseKey(jreKey)
cv = winreg.QueryValueEx(versionKey, "RuntimeLib")
winreg.CloseKey(versionKey)
return cv[0]
except OSError:
pass
return None
|
|
#!/opt/datadog-agent/embedded/bin/python
'''
Datadog
www.datadoghq.com
----
Make sense of your IT Data
Licensed under Simplified BSD License (see LICENSE)
(C) Boxed Ice 2010 all rights reserved
(C) Datadog, Inc. 2010-2013 all rights reserved
'''
# set up logging before importing any other components
from config import initialize_logging # noqa
initialize_logging('forwarder')
# stdlib
from datetime import timedelta
import logging
import os
from Queue import Full, Queue
from socket import error as socket_error, gaierror
import sys
import threading
import zlib
# For pickle & PID files, see issue 293
os.umask(022)
# 3p
try:
import pycurl
except ImportError:
# For the source install, pycurl might not be installed
pycurl = None
from tornado.escape import json_decode
import tornado.httpclient
import tornado.httpserver
import tornado.ioloop
from tornado.options import define, options, parse_command_line
import tornado.web
# project
from checks.check_status import ForwarderStatus
from config import (
get_config,
get_logging_config,
get_url_endpoint,
get_version
)
import modules
from transaction import Transaction, TransactionManager
from util import (
get_hostname,
get_tornado_ioloop,
get_uuid,
json,
Watchdog,
)
from utils.logger import RedactedLogRecord
logging.LogRecord = RedactedLogRecord
log = logging.getLogger('forwarder')
log.setLevel(get_logging_config()['log_level'] or logging.INFO)
DD_ENDPOINT = "dd_url"
TRANSACTION_FLUSH_INTERVAL = 5000 # Every 5 seconds
WATCHDOG_INTERVAL_MULTIPLIER = 10 # 10x flush interval
HEADERS_TO_REMOVE = [
'Host',
'Content-Length',
]
# Maximum delay before replaying a transaction
MAX_WAIT_FOR_REPLAY = timedelta(seconds=90)
# Maximum queue size in bytes (when this is reached, old messages are dropped)
MAX_QUEUE_SIZE = 30 * 1024 * 1024 # 30MB
THROTTLING_DELAY = timedelta(microseconds=1000000/2) # 2 msg/second
class EmitterThread(threading.Thread):
def __init__(self, *args, **kwargs):
self.__name = kwargs['name']
self.__emitter = kwargs.pop('emitter')()
self.__logger = kwargs.pop('logger')
self.__config = kwargs.pop('config')
self.__max_queue_size = kwargs.pop('max_queue_size', 100)
self.__queue = Queue(self.__max_queue_size)
threading.Thread.__init__(self, *args, **kwargs)
self.daemon = True
def run(self):
while True:
(data, headers) = self.__queue.get()
try:
self.__logger.debug('Emitter %r handling a packet', self.__name)
self.__emitter(data, self.__logger, self.__config)
except Exception:
self.__logger.error('Failure during operation of emitter %r', self.__name, exc_info=True)
def enqueue(self, data, headers):
try:
self.__queue.put((data, headers), block=False)
except Full:
self.__logger.warn('Dropping packet for %r due to backlog', self.__name)
class EmitterManager(object):
"""Track custom emitters"""
def __init__(self, config):
self.agentConfig = config
self.emitterThreads = []
for emitter_spec in [s.strip() for s in self.agentConfig.get('custom_emitters', '').split(',')]:
if len(emitter_spec) == 0:
continue
logging.info('Setting up custom emitter %r', emitter_spec)
try:
thread = EmitterThread(
name=emitter_spec,
emitter=modules.load(emitter_spec, 'emitter'),
logger=logging,
config=config,
)
thread.start()
self.emitterThreads.append(thread)
except Exception:
logging.error('Unable to start thread for emitter: %r', emitter_spec, exc_info=True)
logging.info('Done with custom emitters')
def send(self, data, headers=None):
if not self.emitterThreads:
return # bypass decompression/decoding
if headers and headers.get('Content-Encoding') == 'deflate':
data = zlib.decompress(data)
data = json_decode(data)
for emitterThread in self.emitterThreads:
logging.info('Queueing for emitter %r', emitterThread.name)
emitterThread.enqueue(data, headers)
class AgentTransaction(Transaction):
_application = None
_trManager = None
_endpoints = []
_emitter_manager = None
_type = None
@classmethod
def set_application(cls, app):
cls._application = app
cls._emitter_manager = EmitterManager(cls._application._agentConfig)
@classmethod
def set_tr_manager(cls, manager):
cls._trManager = manager
@classmethod
def get_tr_manager(cls):
return cls._trManager
@classmethod
def set_endpoints(cls):
"""
Set Datadog endpoint if an API key exists.
"""
if not cls._application._agentConfig.get('api_key'):
log.warning(u"No API key was found. Aborting endpoint setting.")
return
cls._endpoints.append(DD_ENDPOINT)
def __init__(self, data, headers, msg_type=""):
self._data = data
self._headers = headers
self._headers['DD-Forwarder-Version'] = get_version()
self._msg_type = msg_type
# Call after data has been set (size is computed in Transaction's init)
Transaction.__init__(self)
# Emitters operate outside the regular transaction framework
if self._emitter_manager is not None:
self._emitter_manager.send(data, headers)
# Insert the transaction in the Manager
self._trManager.append(self)
log.debug("Created transaction %d" % self.get_id())
self._trManager.flush()
def __sizeof__(self):
return sys.getsizeof(self._data)
def get_url(self, endpoint):
endpoint_base_url = get_url_endpoint(self._application._agentConfig[endpoint])
api_key = self._application._agentConfig.get('api_key')
if api_key:
return "{0}/intake/{1}?api_key={2}".format(endpoint_base_url, self._msg_type, api_key)
return "{0}/intake/{1}".format(endpoint_base_url, self._msg_type)
def flush(self):
for endpoint in self._endpoints:
url = self.get_url(endpoint)
log.debug(
u"Sending %s to endpoint %s at %s",
self._type, endpoint, url
)
# Getting proxy settings
proxy_settings = self._application._agentConfig.get('proxy_settings', None)
tornado_client_params = {
'url': url,
'method': 'POST',
'body': self._data,
'headers': self._headers,
'validate_cert': not self._application.skip_ssl_validation,
}
# Remove headers that were passed by the emitter. Those don't apply anymore
# This is pretty hacky though as it should be done in pycurl or curl or tornado
for h in HEADERS_TO_REMOVE:
if h in tornado_client_params['headers']:
del tornado_client_params['headers'][h]
log.debug("Removing {0} header.".format(h))
force_use_curl = False
if proxy_settings is not None:
force_use_curl = True
if pycurl is not None:
log.debug("Configuring tornado to use proxy settings: %s:****@%s:%s" % (proxy_settings['user'],
proxy_settings['host'], proxy_settings['port']))
tornado_client_params['proxy_host'] = proxy_settings['host']
tornado_client_params['proxy_port'] = proxy_settings['port']
tornado_client_params['proxy_username'] = proxy_settings['user']
tornado_client_params['proxy_password'] = proxy_settings['password']
if self._application._agentConfig.get('proxy_forbid_method_switch'):
# See http://stackoverflow.com/questions/8156073/curl-violate-rfc-2616-10-3-2-and-switch-from-post-to-get
tornado_client_params['prepare_curl_callback'] = lambda curl: curl.setopt(pycurl.POSTREDIR, pycurl.REDIR_POST_ALL)
if (not self._application.use_simple_http_client or force_use_curl) and pycurl is not None:
ssl_certificate = self._application._agentConfig.get('ssl_certificate', None)
tornado_client_params['ca_certs'] = ssl_certificate
req = tornado.httpclient.HTTPRequest(**tornado_client_params)
use_curl = force_use_curl or self._application._agentConfig.get("use_curl_http_client") and not self._application.use_simple_http_client
if use_curl:
if pycurl is None:
log.error("dd-agent is configured to use the Curl HTTP Client, but pycurl is not available on this system.")
else:
log.debug("Using CurlAsyncHTTPClient")
tornado.httpclient.AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
else:
log.debug("Using SimpleHTTPClient")
http = tornado.httpclient.AsyncHTTPClient()
http.fetch(req, callback=self.on_response)
def on_response(self, response):
if response.error:
log.error("Response: %s" % response)
self._trManager.tr_error(self)
else:
self._trManager.tr_success(self)
self._trManager.flush_next()
class MetricTransaction(AgentTransaction):
_type = "metrics"
class APIMetricTransaction(MetricTransaction):
def get_url(self, endpoint):
endpoint_base_url = get_url_endpoint(self._application._agentConfig[endpoint])
config = self._application._agentConfig
api_key = config['api_key']
url = endpoint_base_url + '/api/v1/series/?api_key=' + api_key
return url
def get_data(self):
return self._data
class APIServiceCheckTransaction(AgentTransaction):
_type = "service checks"
def get_url(self, endpoint):
endpoint_base_url = get_url_endpoint(self._application._agentConfig[endpoint])
config = self._application._agentConfig
api_key = config['api_key']
url = endpoint_base_url + '/api/v1/check_run/?api_key=' + api_key
return url
class StatusHandler(tornado.web.RequestHandler):
def get(self):
threshold = int(self.get_argument('threshold', -1))
m = MetricTransaction.get_tr_manager()
self.write("<table><tr><td>Id</td><td>Size</td><td>Error count</td><td>Next flush</td></tr>")
transactions = m.get_transactions()
for tr in transactions:
self.write("<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" %
(tr.get_id(), tr.get_size(), tr.get_error_count(), tr.get_next_flush()))
self.write("</table>")
if threshold >= 0:
if len(transactions) > threshold:
self.set_status(503)
class AgentInputHandler(tornado.web.RequestHandler):
_MSG_TYPE = ""
def post(self):
"""Read the message and forward it to the intake"""
# read message
msg = self.request.body
headers = self.request.headers
msg_type = self._MSG_TYPE
if msg is not None:
# Setup a transaction for this message
tr = MetricTransaction(msg, headers, msg_type)
else:
raise tornado.web.HTTPError(500)
self.write("Transaction: %s" % tr.get_id())
class MetricsAgentInputHandler(AgentInputHandler):
_MSG_TYPE = "metrics"
class MetadataAgentInputHandler(AgentInputHandler):
_MSG_TYPE = "metadata"
class ApiInputHandler(tornado.web.RequestHandler):
def post(self):
"""Read the message and forward it to the intake"""
# read message
msg = self.request.body
headers = self.request.headers
if msg is not None:
# Setup a transaction for this message
APIMetricTransaction(msg, headers)
else:
raise tornado.web.HTTPError(500)
class ApiCheckRunHandler(tornado.web.RequestHandler):
"""
Handler to submit Service Checks
"""
def post(self):
# read message
msg = self.request.body
headers = self.request.headers
if msg is not None:
# Setup a transaction for this message
tr = APIServiceCheckTransaction(msg, headers)
else:
raise tornado.web.HTTPError(500)
self.write("Transaction: %s" % tr.get_id())
class Application(tornado.web.Application):
def __init__(self, port, agentConfig, watchdog=True,
skip_ssl_validation=False, use_simple_http_client=False):
self._port = int(port)
self._agentConfig = agentConfig
self._metrics = {}
AgentTransaction.set_application(self)
AgentTransaction.set_endpoints()
self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
MAX_QUEUE_SIZE, THROTTLING_DELAY)
AgentTransaction.set_tr_manager(self._tr_manager)
self._watchdog = None
self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
self.use_simple_http_client = use_simple_http_client
if self.skip_ssl_validation:
log.info("Skipping SSL hostname validation, useful when using a transparent proxy")
if watchdog:
watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
self._watchdog = Watchdog(watchdog_timeout,
max_mem_mb=agentConfig.get('limit_memory_consumption', None))
def log_request(self, handler):
""" Override the tornado logging method.
If everything goes well, log level is DEBUG.
Otherwise it's WARNING or ERROR depending on the response code. """
if handler.get_status() < 400:
log_method = log.debug
elif handler.get_status() < 500:
log_method = log.warning
else:
log_method = log.error
request_time = 1000.0 * handler.request.request_time()
log_method(
u"%d %s %.2fms",
handler.get_status(),
handler._request_summary(), request_time
)
def appendMetric(self, prefix, name, host, device, ts, value):
if prefix in self._metrics:
metrics = self._metrics[prefix]
else:
metrics = {}
self._metrics[prefix] = metrics
if name in metrics:
metrics[name].append([host, device, ts, value])
else:
metrics[name] = [[host, device, ts, value]]
def _postMetrics(self):
if len(self._metrics) > 0:
self._metrics['uuid'] = get_uuid()
self._metrics['internalHostname'] = get_hostname(self._agentConfig)
self._metrics['apiKey'] = self._agentConfig['api_key']
MetricTransaction(json.dumps(self._metrics),
headers={'Content-Type': 'application/json'})
self._metrics = {}
def run(self):
handlers = [
(r"/intake/?", AgentInputHandler),
(r"/intake/metrics?", MetricsAgentInputHandler),
(r"/intake/metadata?", MetadataAgentInputHandler),
(r"/api/v1/series/?", ApiInputHandler),
(r"/api/v1/check_run/?", ApiCheckRunHandler),
(r"/status/?", StatusHandler),
]
settings = dict(
cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
xsrf_cookies=False,
debug=False,
log_function=self.log_request
)
non_local_traffic = self._agentConfig.get("non_local_traffic", False)
tornado.web.Application.__init__(self, handlers, **settings)
http_server = tornado.httpserver.HTTPServer(self)
try:
# non_local_traffic must be == True to match, not just some non-false value
if non_local_traffic is True:
http_server.listen(self._port)
else:
# localhost in lieu of 127.0.0.1 to support IPv6
try:
http_server.listen(self._port, address=self._agentConfig['bind_host'])
except gaierror:
log.warning("localhost seems undefined in your host file, using 127.0.0.1 instead")
http_server.listen(self._port, address="127.0.0.1")
except socket_error, e:
if "Errno 99" in str(e):
log.warning("IPv6 doesn't seem to be fully supported. Falling back to IPv4")
http_server.listen(self._port, address="127.0.0.1")
else:
raise
except socket_error, e:
log.exception("Socket error %s. Is another application listening on the same port ? Exiting", e)
sys.exit(1)
except Exception, e:
log.exception("Uncaught exception. Forwarder is exiting.")
sys.exit(1)
log.info("Listening on port %d" % self._port)
# Register callbacks
self.mloop = get_tornado_ioloop()
logging.getLogger().setLevel(get_logging_config()['log_level'] or logging.INFO)
def flush_trs():
if self._watchdog:
self._watchdog.reset()
self._postMetrics()
self._tr_manager.flush()
tr_sched = tornado.ioloop.PeriodicCallback(flush_trs, TRANSACTION_FLUSH_INTERVAL,
io_loop=self.mloop)
# Register optional Graphite listener
gport = self._agentConfig.get("graphite_listen_port", None)
if gport is not None:
log.info("Starting graphite listener on port %s" % gport)
from graphite import GraphiteServer
gs = GraphiteServer(self, get_hostname(self._agentConfig), io_loop=self.mloop)
if non_local_traffic is True:
gs.listen(gport)
else:
gs.listen(gport, address="localhost")
# Start everything
if self._watchdog:
self._watchdog.reset()
tr_sched.start()
self.mloop.start()
log.info("Stopped")
def stop(self):
self.mloop.stop()
def init(skip_ssl_validation=False, use_simple_http_client=False):
agentConfig = get_config(parse_args=False)
port = agentConfig.get('listen_port', 17123)
if port is None:
port = 17123
else:
port = int(port)
app = Application(port, agentConfig, skip_ssl_validation=skip_ssl_validation, use_simple_http_client=use_simple_http_client)
def sigterm_handler(signum, frame):
log.info("caught sigterm. stopping")
app.stop()
import signal
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
return app
def main():
# Deprecation notice
from utils.deprecations import deprecate_old_command_line_tools
deprecate_old_command_line_tools()
define("sslcheck", default=1, help="Verify SSL hostname, on by default")
define("use_simple_http_client", default=0, help="Use Tornado SimpleHTTPClient instead of CurlAsyncHTTPClient")
args = parse_command_line()
skip_ssl_validation = False
use_simple_http_client = False
if unicode(options.sslcheck) == u"0":
skip_ssl_validation = True
if unicode(options.use_simple_http_client) == u"1":
use_simple_http_client = True
# If we don't have any arguments, run the server.
if not args:
app = init(skip_ssl_validation, use_simple_http_client=use_simple_http_client)
try:
app.run()
except Exception:
log.exception("Uncaught exception in the forwarder")
finally:
ForwarderStatus.remove_latest_status()
else:
usage = "%s [help|info]. Run with no commands to start the server" % (sys.argv[0])
command = args[0]
if command == 'info':
logging.getLogger().setLevel(logging.ERROR)
return ForwarderStatus.print_latest_status()
elif command == 'help':
print usage
else:
print "Unknown command: %s" % command
print usage
return -1
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from datetime import datetime, timedelta
from azure.core.exceptions import HttpResponseError
from azure.storage.blob import (
BlobServiceClient,
BlobType,
BlobBlock,
BlobSasPermissions,
ContainerEncryptionScope,
generate_blob_sas,
generate_account_sas, ResourceTypes, AccountSasPermissions, generate_container_sas, ContainerSasPermissions
)
from settings.testcase import BlobPreparer
from devtools_testutils.storage import StorageTestCase
# ------------------------------------------------------------------------------
# The encryption scope are pre-created using management plane tool ArmClient.
# So we can directly use the scope in the test.
TEST_ENCRYPTION_KEY_SCOPE = "antjoscope1"
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE = ContainerEncryptionScope(
default_encryption_scope="containerscope")
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE_DENY_OVERRIDE = {
"default_encryption_scope": "containerscope",
"prevent_encryption_scope_override": True
}
TEST_SAS_ENCRYPTION_SCOPE = "testscope1"
TEST_SAS_ENCRYPTION_SCOPE_2 = "testscope2"
# ------------------------------------------------------------------------------
class StorageCPKNTest(StorageTestCase):
def _setup(self, bsc):
self.config = bsc._config
self.container_name = self.get_resource_name('utcontainer')
# prep some test data so that they can be used in upload tests
self.byte_data = self.get_random_bytes(64 * 1024)
if self.is_live:
try:
bsc.create_container(self.container_name)
except:
pass
def _teardown(self, bsc):
if self.is_live:
try:
bsc.delete_container(self.container_name)
except:
pass
return super(StorageCPKNTest, self).tearDown()
# --Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name("cpk")
def _create_block_blob(self, bsc, blob_name=None, data=None, encryption_scope=None, max_concurrency=1, overwrite=False):
blob_name = blob_name if blob_name else self._get_blob_reference()
blob_client = bsc.get_blob_client(self.container_name, blob_name)
data = data if data else b''
resp = blob_client.upload_blob(data, encryption_scope=encryption_scope, max_concurrency=max_concurrency, overwrite=overwrite)
return blob_client, resp
def _create_append_blob(self, bsc, encryption_scope=None):
blob_name = self._get_blob_reference()
blob = bsc.get_blob_client(
self.container_name,
blob_name)
blob.create_append_blob(encryption_scope=encryption_scope)
return blob
def _create_page_blob(self, bsc, encryption_scope=None):
blob_name = self._get_blob_reference()
blob = bsc.get_blob_client(
self.container_name,
blob_name)
blob.create_page_blob(1024 * 1024, encryption_scope=encryption_scope)
return blob
# -- Test cases for APIs supporting CPK ----------------------------------------------
@pytest.mark.playback_test_only
@BlobPreparer()
def test_put_block_and_put_block_list(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client, _ = self._create_block_blob(bsc)
blob_client.stage_block('1', b'AAA', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
blob_client.stage_block('2', b'BBB', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
blob_client.stage_block('3', b'CCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
block_list = [BlobBlock(block_id='1'), BlobBlock(block_id='2'), BlobBlock(block_id='3')]
put_block_list_resp = blob_client.commit_block_list(block_list,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(put_block_list_resp['etag'])
self.assertIsNotNone(put_block_list_resp['last_modified'])
self.assertTrue(put_block_list_resp['request_server_encrypted'])
self.assertEqual(put_block_list_resp['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), b'AAABBBCCC')
self.assertEqual(blob.properties.etag, put_block_list_resp['etag'])
self.assertEqual(blob.properties.last_modified, put_block_list_resp['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@BlobPreparer()
def test_put_block_and_put_block_list_with_blob_sas(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_name = self._get_blob_reference()
token1 = generate_blob_sas(
storage_account_name,
self.container_name,
blob_name,
account_key=storage_account_key,
permission=BlobSasPermissions(read=True, write=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE,
)
blob_client = BlobServiceClient(self.account_url(storage_account_name, "blob"), token1)\
.get_blob_client(self.container_name, blob_name)
blob_client.stage_block('1', b'AAA')
blob_client.stage_block('2', b'BBB')
blob_client.stage_block('3', b'CCC')
# Act
block_list = [BlobBlock(block_id='1'), BlobBlock(block_id='2'), BlobBlock(block_id='3')]
put_block_list_resp = blob_client.commit_block_list(block_list)
# Assert
self.assertIsNotNone(put_block_list_resp['etag'])
self.assertIsNotNone(put_block_list_resp['last_modified'])
self.assertTrue(put_block_list_resp['request_server_encrypted'])
self.assertEqual(put_block_list_resp['encryption_scope'], TEST_SAS_ENCRYPTION_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), b'AAABBBCCC')
self.assertEqual(blob.properties.etag, put_block_list_resp['etag'])
self.assertEqual(blob.properties.last_modified, put_block_list_resp['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@BlobPreparer()
def test_put_block_and_put_block_list_with_blob_sas_fails(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_name = self._get_blob_reference()
token1 = generate_blob_sas(
storage_account_name,
self.container_name,
blob_name,
account_key=storage_account_key,
permission=BlobSasPermissions(read=True, write=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE,
)
blob_client = BlobServiceClient(self.account_url(storage_account_name, "blob"), token1)\
.get_blob_client(self.container_name, blob_name)
# both ses in SAS and encryption_scopes are both set and have DIFFERENT values will throw exception
with self.assertRaises(HttpResponseError):
blob_client.stage_block('1', b'AAA', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# both ses in SAS and encryption_scopes are both set and have SAME values will succeed
blob_client.stage_block('1', b'AAA', encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)
# Act
block_list = [BlobBlock(block_id='1')]
# both ses in SAS and encryption_scopes are both set and have DIFFERENT values will throw exception
with self.assertRaises(HttpResponseError):
blob_client.commit_block_list(block_list, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# both ses in SAS and encryption_scopes are both set and have SAME values will succeed
put_block_list_resp = blob_client.commit_block_list(block_list, encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)
# Assert
self.assertIsNotNone(put_block_list_resp['etag'])
self.assertIsNotNone(put_block_list_resp['last_modified'])
self.assertTrue(put_block_list_resp['request_server_encrypted'])
self.assertEqual(put_block_list_resp['encryption_scope'], TEST_SAS_ENCRYPTION_SCOPE)
# generate a sas with a different encryption scope
token2 = generate_blob_sas(
storage_account_name,
self.container_name,
blob_name,
account_key=storage_account_key,
permission=BlobSasPermissions(read=True, write=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE,
)
blob_client_diff_encryption_scope_sas = BlobServiceClient(self.account_url(storage_account_name, "blob"), token2)\
.get_blob_client(self.container_name, blob_name)
# blob can be downloaded successfully no matter which encryption scope is used on the blob actually
# the encryption scope on blob is TEST_SAS_ENCRYPTION_SCOPE and ses is TEST_ENCRYPTION_KEY_SCOPE in SAS token,
# while we can still download the blob successfully
blob = blob_client_diff_encryption_scope_sas.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), b'AAA')
self.assertEqual(blob.properties.etag, put_block_list_resp['etag'])
self.assertEqual(blob.properties.last_modified, put_block_list_resp['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_block_blob_with_chunks(self, storage_account_name, storage_account_key):
# parallel operation
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
# Arrange
# to force the in-memory chunks to be used
self.config.use_byte_buffer = True
# Act
# create_blob_from_bytes forces the in-memory chunks to be used
blob_client, upload_response = self._create_block_blob(bsc, data=self.byte_data, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE,
max_concurrency=2)
# Assert
self.assertIsNotNone(upload_response['etag'])
self.assertIsNotNone(upload_response['last_modified'])
self.assertTrue(upload_response['request_server_encrypted'])
self.assertEqual(upload_response['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.etag, upload_response['etag'])
self.assertEqual(blob.properties.last_modified, upload_response['last_modified'])
self._teardown(bsc)
@pytest.mark.live_test_only
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_block_blob_with_sub_streams(self, storage_account_name, storage_account_key):
# problem with the recording framework can only run live
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
# Act
# create_blob_from_bytes forces the in-memory chunks to be used
blob_client, upload_response = self._create_block_blob(bsc, data=self.byte_data, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE,
max_concurrency=2)
# Assert
self.assertIsNotNone(upload_response['etag'])
self.assertIsNotNone(upload_response['last_modified'])
self.assertTrue(upload_response['request_server_encrypted'])
self.assertEqual(upload_response['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.etag, upload_response['etag'])
self.assertEqual(blob.properties.last_modified, upload_response['last_modified'])
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_block_blob_with_single_chunk(self, storage_account_name, storage_account_key):
# Act
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
data = b'AAABBBCCC'
# create_blob_from_bytes forces the in-memory chunks to be used
blob_client, upload_response = self._create_block_blob(bsc, data=data, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(upload_response['etag'])
self.assertIsNotNone(upload_response['last_modified'])
self.assertTrue(upload_response['request_server_encrypted'])
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), data)
self.assertEqual(blob.properties.etag, upload_response['etag'])
self.assertEqual(blob.properties.last_modified, upload_response['last_modified'])
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_put_block_from_url_and_commit_with_cpk(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
# create source blob and get source blob url
source_blob_name = self.get_resource_name("sourceblob")
self.config.use_byte_buffer = True # Make sure using chunk upload, then we can record the request
source_blob_client, _ = self._create_block_blob(bsc, blob_name=source_blob_name, data=self.byte_data)
source_blob_sas = generate_blob_sas(
source_blob_client.account_name,
source_blob_client.container_name,
source_blob_client.blob_name,
snapshot=source_blob_client.snapshot,
account_key=source_blob_client.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
source_blob_url = source_blob_client.url + "?" + source_blob_sas
# create destination blob
self.config.use_byte_buffer = False
destination_blob_client, _ = self._create_block_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act part 1: make put block from url calls
destination_blob_client.stage_block_from_url(block_id=1, source_url=source_blob_url,
source_offset=0, source_length=4 * 1024,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
destination_blob_client.stage_block_from_url(block_id=2, source_url=source_blob_url,
source_offset=4 * 1024, source_length=4 * 1024,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert blocks
committed, uncommitted = destination_blob_client.get_block_list('all')
self.assertEqual(len(uncommitted), 2)
self.assertEqual(len(committed), 0)
# commit the blocks without cpk should fail
block_list = [BlobBlock(block_id='1'), BlobBlock(block_id='2')]
with self.assertRaises(HttpResponseError):
destination_blob_client.commit_block_list(block_list)
# Act commit the blocks with cpk should succeed
put_block_list_resp = destination_blob_client.commit_block_list(block_list,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(put_block_list_resp['etag'])
self.assertIsNotNone(put_block_list_resp['last_modified'])
self.assertTrue(put_block_list_resp['request_server_encrypted'])
# Act get the blob content
blob = destination_blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data[0: 8 * 1024])
self.assertEqual(blob.properties.etag, put_block_list_resp['etag'])
self.assertEqual(blob.properties.last_modified, put_block_list_resp['last_modified'])
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_append_block(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client = self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
for content in [b'AAA', b'BBB', b'CCC']:
append_blob_prop = blob_client.append_block(content, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(append_blob_prop['etag'])
self.assertIsNotNone(append_blob_prop['last_modified'])
self.assertTrue(append_blob_prop['request_server_encrypted'])
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), b'AAABBBCCC')
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_append_block_from_url(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
source_blob_name = self.get_resource_name("sourceblob")
self.config.use_byte_buffer = True # chunk upload
source_blob_client, _ = self._create_block_blob(bsc, blob_name=source_blob_name, data=self.byte_data)
source_blob_sas = generate_blob_sas(
source_blob_client.account_name,
source_blob_client.container_name,
source_blob_client.blob_name,
snapshot=source_blob_client.snapshot,
account_key=source_blob_client.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
source_blob_url = source_blob_client.url + "?" + source_blob_sas
self.config.use_byte_buffer = False
destination_blob_client = self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
append_blob_prop = destination_blob_client.append_block_from_url(source_blob_url,
source_offset=0,
source_length=4 * 1024,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(append_blob_prop['etag'])
self.assertIsNotNone(append_blob_prop['last_modified'])
self.assertTrue(append_blob_prop['request_server_encrypted'])
self.assertEqual(append_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = destination_blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data[0: 4 * 1024])
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_append_blob_with_chunks(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client = self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
append_blob_prop = blob_client.upload_blob(self.byte_data,
blob_type=BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(append_blob_prop['etag'])
self.assertIsNotNone(append_blob_prop['last_modified'])
self.assertTrue(append_blob_prop['request_server_encrypted'])
self.assertEqual(append_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_update_page(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client = self._create_page_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
page_blob_prop = blob_client.upload_page(self.byte_data,
offset=0,
length=len(self.byte_data),
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(page_blob_prop['etag'])
self.assertIsNotNone(page_blob_prop['last_modified'])
self.assertTrue(page_blob_prop['request_server_encrypted'])
self.assertEqual(page_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob(offset=0,
length=len(self.byte_data))
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_update_page_from_url(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
source_blob_name = self.get_resource_name("sourceblob")
self.config.use_byte_buffer = True # Make sure using chunk upload, then we can record the request
source_blob_client, _ = self._create_block_blob(bsc, blob_name=source_blob_name, data=self.byte_data)
source_blob_sas = generate_blob_sas(
source_blob_client.account_name,
source_blob_client.container_name,
source_blob_client.blob_name,
snapshot=source_blob_client.snapshot,
account_key=source_blob_client.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
source_blob_url = source_blob_client.url + "?" + source_blob_sas
self.config.use_byte_buffer = False
blob_client = self._create_page_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
page_blob_prop = blob_client.upload_pages_from_url(source_blob_url,
offset=0,
length=len(self.byte_data),
source_offset=0,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(page_blob_prop['etag'])
self.assertIsNotNone(page_blob_prop['last_modified'])
self.assertTrue(page_blob_prop['request_server_encrypted'])
self.assertEqual(page_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob(offset=0,
length=len(self.byte_data))
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_page_blob_with_chunks(self, storage_account_name, storage_account_key):
# Act
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client = bsc.get_blob_client(self.container_name, self._get_blob_reference())
page_blob_prop = blob_client.upload_blob(self.byte_data,
blob_type=BlobType.PageBlob,
max_concurrency=2,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(page_blob_prop['etag'])
self.assertIsNotNone(page_blob_prop['last_modified'])
self.assertTrue(page_blob_prop['request_server_encrypted'])
self.assertEqual(page_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_get_set_blob_metadata(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client, _ = self._create_block_blob(bsc, data=b'AAABBBCCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
blob_props = blob_client.get_blob_properties()
# Assert
self.assertTrue(blob_props.server_encrypted)
self.assertEqual(blob_props['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act set blob properties
metadata = {'hello': 'world', 'number': '42', 'up': 'upval'}
with self.assertRaises(HttpResponseError):
blob_client.set_blob_metadata(
metadata=metadata,
)
blob_client.set_blob_metadata(metadata=metadata, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
blob_props = blob_client.get_blob_properties()
md = blob_props.metadata
self.assertEqual(3, len(md))
self.assertEqual(md['hello'], 'world')
self.assertEqual(md['number'], '42')
self.assertEqual(md['up'], 'upval')
self.assertFalse('Up' in md)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_snapshot_blob(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client, _ = self._create_block_blob(bsc, data=b'AAABBBCCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act without cpk should not work
with self.assertRaises(HttpResponseError):
blob_client.create_snapshot()
# Act with cpk should work
blob_snapshot = blob_client.create_snapshot(encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(blob_snapshot)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_list_blobs(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client, _ = self._create_block_blob(bsc, blob_name="blockblob", data=b'AAABBBCCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
container_client = bsc.get_container_client(self.container_name)
generator = container_client.list_blobs(include="metadata")
for blob in generator:
self.assertIsNotNone(blob)
# Assert: every listed blob has encryption_scope
self.assertEqual(blob.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@BlobPreparer()
def test_list_blobs_using_container_encryption_scope_sas(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
token = generate_container_sas(
storage_account_name,
self.container_name,
storage_account_key,
permission=ContainerSasPermissions(read=True, write=True, list=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE
)
bsc_with_sas_credential = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=token,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
# blob is encrypted using TEST_SAS_ENCRYPTION_SCOPE
blob_client, _ = self._create_block_blob(bsc_with_sas_credential, blob_name="blockblob", data=b'AAABBBCCC', overwrite=True)
self._create_append_blob(bsc_with_sas_credential)
# generate a token with TEST_ENCRYPTION_KEY_SCOPE
token2 = generate_container_sas(
storage_account_name,
self.container_name,
storage_account_key,
permission=ContainerSasPermissions(read=True, write=True, list=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE
)
bsc_with_diff_sas_credential = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=token2,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
container_client = bsc_with_diff_sas_credential.get_container_client(self.container_name)
# The ses field in SAS token when list blobs is different from the encryption scope used on creating blob, while
# list blobs should also succeed
generator = container_client.list_blobs(include="metadata")
for blob in generator:
self.assertIsNotNone(blob)
# Assert: every listed blob has encryption_scope
# and the encryption scope is the same as the one on blob creation
self.assertEqual(blob.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@BlobPreparer()
def test_copy_with_account_encryption_scope_sas(self, storage_account_name, storage_account_key):
# Arrange
sas_token = generate_account_sas(
storage_account_name,
account_key=storage_account_key,
resource_types=ResourceTypes(object=True, container=True),
permission=AccountSasPermissions(read=True, write=True, delete=True, list=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE_2
)
bsc_with_sas_credential = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=sas_token,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc_with_sas_credential)
# blob is encrypted using TEST_SAS_ENCRYPTION_SCOPE_2
blob_client, _ = self._create_block_blob(bsc_with_sas_credential, blob_name="blockblob", data=b'AAABBBCCC', overwrite=True)
#
sas_token2 = generate_account_sas(
storage_account_name,
account_key=storage_account_key,
resource_types=ResourceTypes(object=True, container=True),
permission=AccountSasPermissions(read=True, write=True, delete=True, list=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE
)
bsc_with_account_key_credential = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=sas_token2,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
copied_blob = self.get_resource_name('copiedblob')
copied_blob_client = bsc_with_account_key_credential.get_blob_client(self.container_name, copied_blob)
# TODO: to confirm with Sean/Heidi ses in SAS cannot be set for async copy.
# The test failed for async copy (without requires_sync=True)
copied_blob_client.start_copy_from_url(blob_client.url, requires_sync=True)
props = copied_blob_client.get_blob_properties()
self.assertEqual(props.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(bsc_with_sas_credential)
@pytest.mark.live_test_only
@BlobPreparer()
def test_copy_blob_from_url_with_ecryption_scope(self, storage_account_name, storage_account_key):
# Arrange
# create sas for source blob
sas_token = generate_account_sas(
storage_account_name,
account_key=storage_account_key,
resource_types=ResourceTypes(object=True, container=True),
permission=AccountSasPermissions(read=True, write=True, delete=True, list=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
bsc_with_sas_credential = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=sas_token,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc_with_sas_credential)
blob_client, _ = self._create_block_blob(bsc_with_sas_credential, blob_name="blockblob", data=b'AAABBBCCC', overwrite=True)
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
copied_blob = self.get_resource_name('copiedblob')
copied_blob_client = bsc.get_blob_client(self.container_name, copied_blob)
copied_blob_client.start_copy_from_url(blob_client.url, requires_sync=True,
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)
props = copied_blob_client.get_blob_properties()
self.assertEqual(props.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(bsc_with_sas_credential)
@pytest.mark.live_test_only
@BlobPreparer()
def test_copy_with_user_delegation_encryption_scope_sas(self, storage_account_name, storage_account_key):
# Arrange
# to get user delegation key
oauth_token_credential = self.generate_oauth_token()
service_client = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=oauth_token_credential,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
user_delegation_key = service_client.get_user_delegation_key(datetime.utcnow(),
datetime.utcnow() + timedelta(hours=1))
self._setup(service_client)
blob_name = self.get_resource_name('blob')
sas_token = generate_blob_sas(
storage_account_name,
self.container_name,
blob_name,
account_key=user_delegation_key,
permission=BlobSasPermissions(read=True, write=True, create=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE
)
bsc_with_delegation_sas = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=sas_token,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
# blob is encrypted using TEST_SAS_ENCRYPTION_SCOPE
blob_client, _ = self._create_block_blob(bsc_with_delegation_sas, blob_name=blob_name, data=b'AAABBBCCC', overwrite=True)
props = blob_client.get_blob_properties()
self.assertEqual(props.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(service_client)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_container_with_default_cpk_n(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
container_client = bsc.create_container('cpkcontainer',
container_encryption_scope=TEST_CONTAINER_ENCRYPTION_KEY_SCOPE)
container_props = container_client.get_container_properties()
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, False)
for container in bsc.list_containers(name_starts_with='cpkcontainer'):
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, False)
blob_client = container_client.get_blob_client("appendblob")
# providing encryption scope when upload the blob
resp = blob_client.upload_blob(b'aaaa', BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Use the provided encryption scope on the blob
self.assertEqual(resp['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
container_client.delete_container()
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_container_with_default_cpk_n_deny_override(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
container_client = bsc.create_container(
'denyoverridecpkcontainer',
container_encryption_scope=TEST_CONTAINER_ENCRYPTION_KEY_SCOPE_DENY_OVERRIDE
)
container_props = container_client.get_container_properties()
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, True)
for container in bsc.list_containers(name_starts_with='denyoverridecpkcontainer'):
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, True)
blob_client = container_client.get_blob_client("appendblob")
# It's not allowed to set encryption scope on the blob when the container denies encryption scope override.
with self.assertRaises(HttpResponseError):
blob_client.upload_blob(b'aaaa', BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
resp = blob_client.upload_blob(b'aaaa', BlobType.AppendBlob)
self.assertEqual(resp['encryption_scope'], TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
container_client.delete_container()
# ------------------------------------------------------------------------------
|
|
from common_fixtures import * # NOQA
from cattle import ApiError
import yaml
@pytest.fixture(scope='module')
def image_uuid(context):
return context.image_uuid
def test_create_env_and_svc(client, image_uuid):
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": image_uuid}
# create service
service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
# verify that the load balancer was created for the service
lb = client. \
list_loadBalancer(serviceId=service.id)
assert len(lb) == 1
assert lb[0].state == 'active'
def test_activate_lb_svc(super_client, context, client, image_uuid):
host = context.host
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": image_uuid,
"ports": [8082, '910:1001']}
service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate(), 120)
# perform validation
lb, service = _validate_lb_service_activate(env, host,
service, client,
['8082:8082', '910:1001'])
_validate_lb_instance(host, lb, super_client, service)
def test_deactivate_then_activate_lb_svc(super_client, new_context):
client = new_context.client
host1, host2, lb, service, env = _activate_svc_w_scale_two(new_context,
random_str())
# 1. verify that all hosts mappings are created
validate_add_host(host1, lb, client)
validate_add_host(host2, lb, client)
# 2. deactivate service and validate that
# the hosts mappings are still around,
# and lb still present
service = client.wait_success(service.deactivate())
validate_add_host(host1, lb, client)
validate_add_host(host2, lb, client)
lb = super_client.reload(lb)
assert lb.state == "inactive"
# 3. activate service again
service = client.wait_success(service.activate())
assert service.state == 'active'
lb = super_client.reload(lb)
assert lb.state == "active"
_validate_lb_instance(host1, lb, super_client, service)
_validate_lb_instance(host2, lb, super_client, service)
def test_deactivate_then_remove_lb_svc(new_context):
client = new_context.client
host1, host2, lb, service, env = _activate_svc_w_scale_two(new_context,
random_str())
# 1. verify that all hosts mappings are created
validate_add_host(host1, lb, client)
validate_add_host(host2, lb, client)
# 2. deactivate service and validate that
# the hosts mappings are still around,
# and lb still present
service = client.wait_success(service.deactivate())
validate_add_host(host1, lb, client)
validate_add_host(host2, lb, client)
lb = client.reload(lb)
assert lb.state == "inactive"
# try to remove lb - should fail
with pytest.raises(ApiError) as e:
lb = client.wait_success(client.delete(lb))
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidAction'
assert e.value.error.fieldName == 'serviceId'
# remove service and verify that the lb is gone
client.wait_success(service.remove())
wait_for_condition(client, lb, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def test_remove_active_lb_svc(new_context):
client = new_context.client
host1, host2, lb, service, env = _activate_svc_w_scale_two(new_context,
random_str())
# 1. verify that all hosts mappings are created/updated
validate_add_host(host1, lb, client)
validate_add_host(host2, lb, client)
# 2. delete service and validate that the hosts mappings are gone,
# and lb is gone as well as lb config/listeners
client.wait_success(service.remove())
validate_remove_host(host1, lb, client)
validate_remove_host(host2, lb, client)
wait_for_condition(
client, lb, _resource_is_removed,
lambda x: 'State is: ' + x.state)
lb_configs = client. \
list_loadBalancerConfig(name=env.name + "_" + service.name)
assert len(lb_configs) == 1
lb_config = lb_configs[0]
lb_config = client.wait_success(lb_config)
assert lb_config.state == "removed"
def test_targets(client, context):
host = context.host
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
# create web, db lb services
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
web_service = client. \
create_service(name=random_str() + "web",
environmentId=env.id,
launchConfig=launch_config)
web_service = client.wait_success(web_service)
db_service = client. \
create_service(name=random_str() + "db",
environmentId=env.id,
launchConfig=launch_config)
db_service = client.wait_success(db_service)
lb_launch_config = {"imageUuid": image_uuid,
"ports": [8081, '909:1001']}
lb_service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=lb_launch_config)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
# map web service to lb service - early binding,
# before services are activated
service_link = {"serviceId": web_service.id, "ports": ["a.com:90"]}
lb_service = lb_service.addservicelink(serviceLink=service_link)
# activate web and lb services
lb_service = client.wait_success(lb_service.activate(), 120)
_validate_lb_service_activate(env, host, lb_service, client,
['8081:8081', '909:1001'])
web_service = client.wait_success(web_service.activate(), 120)
assert web_service.state == "active"
db_service = client.wait_success(db_service.activate(), 120)
assert db_service.state == "active"
# bind db and lb services after service is activated
service_link = {"serviceId": db_service.id, "ports": ["a.com:90"]}
lb_service.addservicelink(serviceLink=service_link)
# verify that instances of db and web services were added to lb
web_instances = client. \
list_container(name=env.name + "_" + web_service.name + "_" + "1")
assert len(web_instances) == 1
_validate_add_target_instance(web_instances[0], client, ports=["a.com:90"])
db_instances = client. \
list_container(name=env.name + "_" + db_service.name + "_" + "1")
assert len(db_instances) == 1
_validate_add_target_instance(db_instances[0], client, ports=["a.com:90"])
_validate_add_service_link(client, lb_service, db_service,
ports=["a.com:90"])
_validate_add_service_link(client, lb_service, web_service,
ports=["a.com:90"])
# remove link and make sure that the target map is gone
service_link = {"serviceId": db_service.id, "ports": ["a.com:90"]}
lb_service.removeservicelink(serviceLink=service_link)
# validate that the instance is still running
db_instance = client.reload(db_instances[0])
assert db_instance.state == 'running'
_validate_remove_target_instance(db_instance, client)
def test_target_ips(client, context):
host = context.host
user_account_id = host.accountId
env = client.create_environment(name=random_str(),
accountId=user_account_id)
env = client.wait_success(env)
assert env.state == "active"
# create web, db lb services
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
web_ips = ["72.22.16.5", '72.22.16.6']
web_service = client. \
create_externalService(name=random_str() + "web",
environmentId=env.id,
launchConfig=launch_config,
externalIpAddresses=web_ips)
web_service = client.wait_success(web_service)
db_ips = ["192.168.0.9", '192.168.0.10']
db_service = client. \
create_externalService(name=random_str() + "db",
environmentId=env.id,
launchConfig=launch_config,
externalIpAddresses=db_ips)
db_service = client.wait_success(db_service)
lb_launch_config = {"imageUuid": image_uuid,
"ports": [1010, '111:111']}
lb_service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=lb_launch_config,
accountId=user_account_id,
loadBalancerInstanceUriPredicate='sim://')
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
# map web service to lb service - early binding,
# before services are activated
service_link = {"serviceId": web_service.id, "ports": ["a.com:90"]}
lb_service = lb_service.addservicelink(serviceLink=service_link)
# activate web and lb services
lb_service = client.wait_success(lb_service.activate(), 120)
_validate_lb_service_activate(env, host, lb_service, client,
['1010:1010', '111:111'])
web_service = client.wait_success(web_service.activate(), 120)
assert web_service.state == "active"
db_service = client.wait_success(db_service.activate(), 120)
assert db_service.state == "active"
# bind db and lb services after service is activated
service_link = {"serviceId": db_service.id, "ports": ["a.com:90"]}
lb_service.addservicelink(serviceLink=service_link)
# verify that ips of db and web services were added to lb
_validate_add_target_ip("72.22.16.5", client, ports=["a.com:90"])
_validate_add_target_ip("72.22.16.6", client, ports=["a.com:90"])
_validate_add_target_ip("192.168.0.9", client, ports=["a.com:90"])
_validate_add_target_ip("192.168.0.10", client, ports=["a.com:90"])
# remove link and make sure that the db targets are gone
service_link = {"serviceId": db_service.id, "ports": ["a.com:90"]}
lb_service.removeservicelink(serviceLink=service_link)
_validate_remove_target_ip("192.168.0.9", client)
_validate_remove_target_ip("192.168.0.10", client)
# remove web service and validate that the web targets are gone
client.wait_success(web_service.remove())
_validate_remove_target_ip("72.22.16.5", client)
_validate_remove_target_ip("72.22.16.6", client)
def test_create_svc_with_lb_config(context, client):
name = random_str()
env = client.create_environment(name=name)
env = client.wait_success(env)
assert env.state == "active"
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
health_check = {"responseTimeout": 3,
"interval": 4, "healthyThreshold": 5,
"unhealthyThreshold": 6, "requestLine": "index.html"}
app_policy = {"name": "policy1", "cookie": "cookie1",
"maxLength": 4, "prefix": "true",
"requestLearn": "false", "timeout": 10,
"mode": "query_string"}
lb_policy = {"name": "policy2", "cookie": "cookie1",
"domain": ".test.com", "indirect": "true",
"nocache": "true", "postonly": "true",
"mode": "insert"}
lb_config = {"name": name, "healthCheck": health_check,
"appCookieStickinessPolicy": app_policy,
"lbCookieStickinessPolicy": lb_policy}
# create service
service = client. \
create_loadBalancerService(name=name,
environmentId=env.id,
launchConfig=launch_config,
loadBalancerConfig=lb_config)
service = client.wait_success(service)
assert service.state == "inactive"
assert service.loadBalancerConfig is not None
# verify that the load balancer was created for the service
lb = client. \
list_loadBalancer(serviceId=service.id)
assert len(lb) == 1
assert lb[0].state == 'active'
# verify the load balancer config info
configs = client. \
list_loadBalancerConfig(name=name)
assert len(configs) == 1
config = configs[0]
assert config.healthCheck is not None
assert config.healthCheck.responseTimeout == 3
assert config.healthCheck.interval == 4
assert config.healthCheck.healthyThreshold == 5
assert config.healthCheck.unhealthyThreshold == 6
assert config.healthCheck.requestLine == "index.html"
assert config.appCookieStickinessPolicy is not None
assert config.appCookieStickinessPolicy.name == "policy1"
assert config.appCookieStickinessPolicy.cookie == "cookie1"
assert config.appCookieStickinessPolicy.maxLength == 4
assert config.appCookieStickinessPolicy.prefix is True
assert config.appCookieStickinessPolicy.requestLearn is False
assert config.appCookieStickinessPolicy.timeout == 10
assert config.appCookieStickinessPolicy.mode == "query_string"
assert config.lbCookieStickinessPolicy is not None
assert config.lbCookieStickinessPolicy.name == "policy2"
assert config.lbCookieStickinessPolicy.cookie == "cookie1"
assert config.lbCookieStickinessPolicy.domain == ".test.com"
assert config.lbCookieStickinessPolicy.indirect is True
assert config.lbCookieStickinessPolicy.nocache is True
assert config.lbCookieStickinessPolicy.postonly is True
assert config.lbCookieStickinessPolicy.mode == "insert"
def test_scale(new_context):
client = new_context.client
host1 = new_context.host
host2 = register_simulated_host(new_context)
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
image_uuid = new_context.image_uuid
launch_config = {"imageUuid": image_uuid,
"ports": [8081, '909:1001']}
service = client.create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
# 1. verify that the service was activated
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# 2. verify that lb got created
lbs = client. \
list_loadBalancer(serviceId=service.id)
assert len(lbs) == 1
lb = client.wait_success(lbs[0])
assert lb.state == 'active'
# validate that one host map was created
_wait_until_active_map_count(lb, 1, client)
# scale up
service = client.update(service, scale=2, name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == 2
_wait_until_active_map_count(lb, 2, client)
# now scale down
service = client.update(service, scale=0, name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == 0
_wait_until_active_map_count(lb, 0, client)
validate_remove_host(host1, lb, client)
validate_remove_host(host2, lb, client)
def test_labels(super_client, client, context):
host = context.host
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
# create service with labels, and validate all of them
# plus service label were set
service_name = random_str()
initial_labels = {'affinity': "container==B", '!affinity': "container==C"}
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
"ports": [8010, '913:913'], "labels": initial_labels}
service = client. \
create_loadBalancerService(name=service_name,
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate(), 120)
lb, service = _validate_lb_service_activate(env, host,
service, client,
['8010:8010', '913:913'])
lb_instance = _validate_lb_instance(host, lb, super_client, service)
result_labels = {'affinity': "container==B", '!affinity': "container==C",
'io.rancher.stack_service.name':
env.name + "/" + service_name}
assert all(item in lb_instance.labels.items()
for item in result_labels.items()) is True
# create service w/o labels, and validate that
# only one service label was set
service_name = random_str()
launch_config = {"imageUuid": image_uuid,
"ports": [8089, '914:914']}
service = client. \
create_loadBalancerService(name=service_name,
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate(), 120)
lb, service = _validate_lb_service_activate(env, host,
service, client,
['8089:8089', '914:914'])
lb_instance = _validate_lb_instance(host, lb, super_client, service)
name = env.name + '/' + service_name
result_labels = {'io.rancher.stack_service.name': name}
assert all(item in lb_instance.labels.items()
for item in result_labels.items()) is True
def test_inactive_lb(client, context):
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
# create and activate web service
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
web_service = client. \
create_service(name=random_str() + "web",
environmentId=env.id,
launchConfig=launch_config)
web_service = client.wait_success(web_service)
web_service = client.wait_success(web_service.activate(), 120)
assert web_service.state == "active"
web_instances = client. \
list_container(name=env.name + "_" + web_service.name + "_" + "1")
assert len(web_instances) == 1
# create lb service, but don't activate
lb_launch_config = {"imageUuid": image_uuid,
"ports": [1000]}
lb_service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=lb_launch_config)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
lbs = client. \
list_loadBalancer(serviceId=lb_service.id)
assert len(lbs) == 1
lb = lbs[0]
# map web service to lb service; validate no lb targets were created
service_link = {"serviceId": web_service.id, "ports": ["a.com:90"]}
lb_service = lb_service.addservicelink(serviceLink=service_link)
target_maps = client. \
list_loadBalancerTarget(loadBalancerId=lb.id)
assert len(target_maps) == 0
# activate lb service and validate web instance was added as lb target
lb_service = client.wait_success(lb_service.activate(), 120)
assert lb_service.state == "active"
target_maps = client. \
list_loadBalancerTarget(loadBalancerId=lb.id)
assert len(target_maps) == 1
_validate_add_target_instance(web_instances[0], client, ports=["a.com:90"])
# deactivate lb service, and remove service link
lb_service = client.wait_success(lb_service.deactivate(), 120)
assert lb_service.state == "inactive"
service_link = {"serviceId": web_service.id, "ports": ["a.com:90"]}
lb_service = lb_service.removeservicelink(serviceLink=service_link)
lb_service = client.wait_success(lb_service.activate(), 120)
assert lb_service.state == "active"
_validate_remove_target_instance(web_instances[0], client)
def test_destroy_svc_instance(super_client, context, client, image_uuid):
host = context.host
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": image_uuid,
"ports": [95, '94:94']}
service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate(), 120)
# perform validation
lb, service = _validate_lb_service_activate(env, host,
service, client,
['94:94', '95:95'])
instance = _validate_lb_instance(host, lb, super_client, service)
client.wait_success(client.delete(instance))
_wait_until_active_map_count(lb, 0, client)
client.wait_success(service)
_wait_until_active_map_count(lb, 1, client)
_validate_lb_instance(host, lb, super_client, service)
def test_set_service_links(client, context):
env1 = client.create_environment(name=random_str())
env1 = client.wait_success(env1)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
lb_service = client.create_loadBalancerService(name="lb",
environmentId=env1.id,
launchConfig=launch_config)
lb_service = client.wait_success(lb_service)
service2 = client.create_service(name=random_str(),
environmentId=env1.id,
launchConfig=launch_config)
service2 = client.wait_success(service2)
service3 = client.create_service(name=random_str(),
environmentId=env1.id,
launchConfig=launch_config)
service3 = client.wait_success(service3)
# set service2, service3 links for lb service
service_link1 = {"serviceId": service2.id, "ports": ["a.com:90"]}
service_link2 = {"serviceId": service3.id, "ports": ["a.com:90"]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link1, service_link2])
_validate_add_service_link(client, lb_service, service2,
ports=["a.com:90"])
_validate_add_service_link(client, lb_service, service3,
ports=["a.com:90"])
# update the link with new ports
service_link1 = {"serviceId": service2.id, "ports": ["a.com:100"]}
service_link2 = {"serviceId": service3.id, "ports": ["a.com:101"]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link1, service_link2])
_validate_add_service_link(client, lb_service, service2,
ports=["a.com:100"])
_validate_add_service_link(client, lb_service, service3,
ports=["a.com:101"])
# remove link for service3 from the list of links
service_link = {"serviceId": service2.id, "ports": ["a.com:100"]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link])
_validate_remove_service_link(client, lb_service, service3, 1)
# try to set duplicated service links
with pytest.raises(ApiError) as e:
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link, service_link])
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
assert e.value.error.fieldName == 'serviceId'
# set empty service link set
lb_service = lb_service.setservicelinks(serviceLinks=[])
_validate_remove_service_link(client, lb_service, service2, 1)
def test_modify_link(client, context):
env = client.create_environment(name=random_str())
env = client.wait_success(env)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
lb_service = client.create_loadBalancerService(name="lb",
environmentId=env.id,
launchConfig=launch_config)
lb_service = client.wait_success(lb_service)
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
# set service link with hostname 1
service_link = {"serviceId": service.id, "ports": ["a.com:90"]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link])
_validate_add_service_link(client, lb_service, service, ports=["a.com:90"])
# update the link with new ports
service_link = {"serviceId": service.id, "ports": ["b.com:100"]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link])
_validate_add_service_link(client, lb_service,
service, ports=["b.com:100"])
def _create_service(client, env, launch_config, name=None):
if name:
svc_name = name
else:
svc_name = random_str()
service1 = client.create_service(name=svc_name,
environmentId=env.id,
launchConfig=launch_config)
service1 = client.wait_success(service1)
return service1
def test_create_links(client, context):
env = client.create_environment(name=random_str())
env = client.wait_success(env)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
lb_service = client.create_loadBalancerService(name="lb",
environmentId=env.id,
launchConfig=launch_config)
lb_service = client.wait_success(lb_service)
service1 = _create_service(client, env, launch_config)
service2 = _create_service(client, env, launch_config)
service3 = _create_service(client, env, launch_config)
service4 = _create_service(client, env, launch_config)
service5 = _create_service(client, env, launch_config)
service6 = _create_service(client, env, launch_config)
service7 = _create_service(client, env, launch_config)
service8 = _create_service(client, env, launch_config)
service9 = _create_service(client, env, launch_config)
service10 = _create_service(client, env, launch_config)
service11 = _create_service(client, env, launch_config)
service12 = _create_service(client, env, launch_config)
service13 = _create_service(client, env, launch_config)
service14 = _create_service(client, env, launch_config)
service15 = _create_service(client, env, launch_config)
service16 = _create_service(client, env, launch_config)
# set service link with hostname 1
port1 = "example.com:80/path=81"
port2 = "example.com"
port3 = "example.com:80"
port4 = "example.com:80/path"
port5 = "example.com:80=81"
port6 = "example.com/path"
port7 = "example.com/path=81"
port8 = "example.com=81"
port9 = "80/path"
port10 = "80/path=81"
port11 = "80=81"
port12 = "/path"
port13 = "/path=81"
port14 = "81"
port15 = "example.com/path1/path2/path3=81"
# old style
port16 = "90:a.com/hello"
service_link1 = {"serviceId": service1.id, "ports": [port1]}
service_link2 = {"serviceId": service2.id, "ports": [port2]}
service_link3 = {"serviceId": service3.id, "ports": [port3]}
service_link4 = {"serviceId": service4.id, "ports": [port4]}
service_link5 = {"serviceId": service5.id, "ports": [port5]}
service_link6 = {"serviceId": service6.id, "ports": [port6]}
service_link7 = {"serviceId": service7.id, "ports": [port7]}
service_link8 = {"serviceId": service8.id, "ports": [port8]}
service_link9 = {"serviceId": service9.id, "ports": [port9]}
service_link10 = {"serviceId": service10.id, "ports": [port10]}
service_link11 = {"serviceId": service11.id, "ports": [port11]}
service_link12 = {"serviceId": service12.id, "ports": [port12]}
service_link13 = {"serviceId": service13.id, "ports": [port13]}
service_link14 = {"serviceId": service14.id, "ports": [port14]}
service_link15 = {"serviceId": service15.id, "ports": [port15]}
service_link16 = {"serviceId": service16.id, "ports": [port16]}
lb_service = lb_service. \
setservicelinks(serviceLinks=[service_link1, service_link2,
service_link3, service_link4,
service_link5, service_link6,
service_link7, service_link8,
service_link9, service_link10,
service_link11, service_link12,
service_link13, service_link14,
service_link15, service_link16])
_validate_add_service_link(client, lb_service, service1, ports=[port1])
_validate_add_service_link(client, lb_service, service2, ports=[port2])
_validate_add_service_link(client, lb_service, service3, ports=[port3])
_validate_add_service_link(client, lb_service, service4, ports=[port4])
_validate_add_service_link(client, lb_service, service5, ports=[port5])
_validate_add_service_link(client, lb_service, service6, ports=[port6])
_validate_add_service_link(client, lb_service, service7, ports=[port7])
_validate_add_service_link(client, lb_service, service8, ports=[port8])
_validate_add_service_link(client, lb_service, service9, ports=[port9])
_validate_add_service_link(client, lb_service, service10, ports=[port10])
_validate_add_service_link(client, lb_service, service11, ports=[port11])
_validate_add_service_link(client, lb_service, service12, ports=[port12])
_validate_add_service_link(client, lb_service, service13, ports=[port13])
_validate_add_service_link(client, lb_service, service14, ports=[port14])
_validate_add_service_link(client, lb_service, service15, ports=[port15])
_validate_add_service_link(client, lb_service, service16, ports=[port16])
service_link1 = {"serviceId": service1.id, "ports": ["90=100=100"]}
with pytest.raises(ApiError) as e:
lb_service. \
setservicelinks(serviceLinks=[service_link1])
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidPort'
service_link1 = {"serviceId": service1.id, "ports": ["a.com:b.com:80"]}
with pytest.raises(ApiError) as e:
lb_service. \
setservicelinks(serviceLinks=[service_link1])
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidPort'
def test_private_lb(client, context):
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
"ports": [567, '568:569'],
"expose": [9999, '9998:9997']}
service = client.create_loadBalancerService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
# 1. verify that the service was activated
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# 2. verify that lb got created
lbs = client. \
list_loadBalancer(serviceId=service.id)
assert len(lbs) == 1
lb = client.wait_success(lbs[0])
assert lb.state == 'active'
listeners = client. \
list_loadBalancerListener(serviceId=service.id, privatePort=567)
assert len(listeners) == 1
assert listeners[0].sourcePort == 567
assert listeners[0].sourceProtocol == 'http'
assert listeners[0].privatePort == 567
assert listeners[0].targetPort == 567
listeners = client. \
list_loadBalancerListener(serviceId=service.id, privatePort=568)
assert len(listeners) == 1
assert listeners[0].sourcePort == 568
assert listeners[0].sourceProtocol == 'http'
assert listeners[0].privatePort == 568
assert listeners[0].targetPort == 569
listeners = client. \
list_loadBalancerListener(serviceId=service.id, privatePort=9999)
assert len(listeners) == 1
assert listeners[0].sourcePort is None
assert listeners[0].sourceProtocol == 'http'
assert listeners[0].privatePort == 9999
assert listeners[0].targetPort == 9999
listeners = client. \
list_loadBalancerListener(serviceId=service.id, privatePort=9998)
assert len(listeners) == 1
assert listeners[0].sourcePort is None
assert listeners[0].sourceProtocol == 'http'
assert listeners[0].privatePort == 9998
assert listeners[0].targetPort == 9997
def test_export_config(client, context):
env1 = client.create_environment(name="env1")
env1 = client.wait_success(env1)
assert env1.state == "active"
env2 = client.create_environment(name="env2")
env2 = client.wait_success(env2)
assert env2.state == "active"
# create services
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
web_service = _create_service(client, env1, launch_config, "web")
web_service1 = _create_service(client, env1, launch_config, "web1")
web_external = _create_service(client, env2, launch_config, "web2")
lb_launch_config = {"imageUuid": image_uuid,
"ports": [8081, '909:1001']}
lb_service = client. \
create_loadBalancerService(name=random_str(),
environmentId=env1.id,
launchConfig=lb_launch_config)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
# map web services
service_link = {"serviceId": web_service.id,
"ports": ["a.com:90"], "name": "test"}
service_link1 = {"serviceId": web_service1.id}
service_link_ext = {"serviceId": web_external.id, "ports": ["a.com:90"]}
lb_service = lb_service.addservicelink(serviceLink=service_link)
lb_service = lb_service.addservicelink(serviceLink=service_link1)
lb_service = lb_service.addservicelink(serviceLink=service_link_ext)
compose_config = env1.exportconfig()
assert compose_config is not None
document = yaml.load(compose_config.dockerComposeConfig)
assert len(document[lb_service.name]['links']) == 2
assert len(document[lb_service.name]['external_links']) == 1
assert len(document[lb_service.name]['labels']) == 2
labels = {"io.rancher.loadbalancer.target.web": "a.com:90",
"io.rancher.loadbalancer.target.env2/web2": "a.com:90"}
links = ["web:web", "web1:web1"]
external_links = ["env2/web2:web2"]
assert document[lb_service.name]['labels'] == labels
assert document[lb_service.name]['links'] == links
assert document[lb_service.name]['external_links'] == external_links
def _wait_until_active_map_count(lb, count, super_client, timeout=30):
start = time.time()
host_maps = super_client. \
list_loadBalancerHostMap(loadBalancerId=lb.id,
state="active")
while len(host_maps) != count:
time.sleep(.5)
host_maps = super_client. \
list_loadBalancerHostMap(loadBalancerId=lb.id, state="active")
if time.time() - start > timeout:
assert 'Timeout waiting for agent to be removed.'
return
def _resource_is_active(resource):
return resource.state == 'active'
def _resource_is_removed(resource):
return resource.state == 'removed'
def validate_add_host(host, lb, client):
host_maps = client. \
list_loadBalancerHostMap(loadBalancerId=lb.id,
hostId=host.id)
assert len(host_maps) == 1
host_map = host_maps[0]
wait_for_condition(
client, host_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
assert host_map.hostId == host.id
def validate_remove_host(host, lb, super_client):
host_maps = super_client. \
list_loadBalancerHostMap(loadBalancerId=lb.id,
hostId=host.id)
assert len(host_maps) == 1
host_map = host_maps[0]
wait_for_condition(
super_client, host_map, _resource_is_removed,
lambda x: 'State is: ' + x.state)
assert host_map.hostId == host.id
def _validate_lb_instance(host, lb, super_client, service):
host_maps = super_client. \
list_loadBalancerHostMap(loadBalancerId=lb.id,
hostId=host.id, state='active')
assert len(host_maps) == 1
# verify that the agent got created
uri = 'delegate:///?lbId={}&hostMapId={}'. \
format(get_plain_id(super_client, lb),
get_plain_id(super_client, host_maps[0]))
agents = super_client.list_agent(uri=uri)
assert len(agents) == 1
# verify that the agent instance got created
agent_instances = super_client.list_instance(agentId=agents[0].id)
assert len(agent_instances) == 1
# verify that the instance was registered within the service
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id,
instanceId=agent_instances[0].id)
assert len(instance_service_map) == 1
return agent_instances[0]
def _validate_create_listener(env, service, source_port,
client, target_port):
l_name = env.name + "_" + service.name + "_" + source_port
listeners = client. \
list_loadBalancerListener(sourcePort=source_port,
name=l_name)
assert len(listeners) >= 1
listener = listeners[0]
assert listener.sourcePort == int(source_port)
assert listener.privatePort == int(source_port)
assert listener.targetPort == int(target_port)
return listener
def _validate_lb_service_activate(env, host, service, client, ports):
# 1. verify that the service was activated
assert service.state == "active"
# 2. verify that lb got created
lbs = client. \
list_loadBalancer(serviceId=service.id)
assert len(lbs) == 1
lb = client.wait_success(lbs[0])
assert lb.state == 'active'
# 3. verify host mapping got created
validate_add_host(host, lb, client)
# 4. verify that listeners are created and mapped to the config
config_id = lb.loadBalancerConfigId
source_port = ports[0].split(':')[0]
target_port = ports[0].split(':')[1]
listener = _validate_create_listener(env, service, source_port,
client, target_port)
_validate_add_listener(config_id, listener, client)
source_port = ports[1].split(':')[0]
target_port = ports[1].split(':')[1]
listener = _validate_create_listener(env, service, source_port,
client, target_port)
_validate_add_listener(config_id, listener, client)
return lb, service
def _validate_add_listener(config_id, listener, client):
lb_config_maps = _wait_until_map_created(config_id, listener, client)
config_map = lb_config_maps[0]
wait_for_condition(
client, config_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
def _wait_until_map_created(config_id, listener, client, timeout=30):
start = time.time()
l_id = listener.id
lb_config_maps = client. \
list_loadBalancerConfigListenerMap(loadBalancerListenerId=l_id,
loadBalancerConfigId=config_id)
while len(lb_config_maps) == 0:
time.sleep(.5)
lb_config_maps = client. \
list_loadBalancerConfigListenerMap(loadBalancerListenerId=l_id,
loadBalancerConfigId=config_id)
if time.time() - start > timeout:
assert 'Timeout waiting for map creation'
return lb_config_maps
def _wait_until_target_instance_map_created(super_client,
container, timeout=30):
start = time.time()
target_maps = super_client. \
list_loadBalancerTarget(instanceId=container.id)
while len(target_maps) == 0:
time.sleep(.5)
target_maps = super_client. \
list_loadBalancerTarget(instanceId=container.id)
if time.time() - start > timeout:
assert 'Timeout waiting for map creation'
return target_maps
def _validate_add_target_ip(ip, super_client, ports=None):
target_maps = _wait_until_target_ip_map_created(super_client, ip)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
super_client, target_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
if ports:
assert target_map.ports == ports
def _validate_remove_target_instance(container, super_client):
target_maps = super_client. \
list_loadBalancerTarget(instanceId=container.id)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
super_client, target_map, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def _validate_remove_target_ip(ip, super_client):
target_maps = super_client. \
list_loadBalancerTarget(ipAddress=ip)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
super_client, target_map, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def _validate_add_target_instance(container, super_client, ports=None):
target_maps = _wait_until_target_instance_map_created(super_client,
container)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
super_client, target_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
if ports:
assert target_map.ports == ports
def _wait_until_target_ip_map_created(super_client, ip, timeout=30):
start = time.time()
target_maps = super_client. \
list_loadBalancerTarget(ipAddress=ip)
while len(target_maps) == 0:
time.sleep(.5)
target_maps = super_client. \
list_loadBalancerTarget(ipAddress=ip)
if time.time() - start > timeout:
assert 'Timeout waiting for map creation'
return target_maps
def _activate_svc_w_scale_two(new_context, random_str):
client = new_context.client
host1 = new_context.host
host2 = register_simulated_host(new_context)
env = client.create_environment(name=random_str)
env = client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": new_context.image_uuid,
"ports": [8081, '909:1001']}
service = client. \
create_loadBalancerService(name=random_str,
environmentId=env.id,
launchConfig=launch_config,
scale=2)
service = client.wait_success(service)
assert service.state == "inactive"
# 1. verify that the service was activated
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# 2. verify that lb got created
lbs = client.list_loadBalancer(serviceId=service.id)
assert len(lbs) == 1
lb = client.wait_success(lbs[0])
assert lb.state == 'active'
return host1, host2, lb, service, env
def _validate_add_service_link(client, service, consumedService, ports=None):
service_maps = client. \
list_serviceConsumeMap(serviceId=service.id,
consumedServiceId=consumedService.id)
assert len(service_maps) == 1
if ports:
for value in service_maps:
if value.ports == ports:
service_map = value
break
assert service_map is not None
def _validate_remove_service_link(client, service, consumedService, count,
timeout=30):
start = time.time()
service_maps = client. \
list_serviceConsumeMap(serviceId=service.id,
consumedServiceId=consumedService.id,
state='removed')
while len(service_maps) != count:
time.sleep(.5)
service_maps = client. \
list_serviceConsumeMap(serviceId=service.id,
consumedServiceId=consumedService.id,
state='removed')
if time.time() - start > timeout:
assert 'Timeout waiting for map to be removed.'
|
|
import json
import requests
from requests.auth import HTTPBasicAuth
from django.http import HttpResponse
from django.contrib.auth import login, logout
from django.contrib.auth.models import User, Group
from django.conf import settings
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly, IsAdminUser
from rest_framework.authentication import BasicAuthentication
from rest_framework.pagination import PageNumberPagination
from rest_framework.decorators import permission_classes, authentication_classes, detail_route
from rest_framework.parsers import JSONParser
from rest_framework import viewsets, generics
from rest_framework import status
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from .permissions import *
from .models import Author, Post, Image, ForeignPost
from .remoteConnection import *
from serializers import *
import urlparse
class PostsResultsSetPagination(PageNumberPagination):
page_size_query_param = 'size'
page_size = 20
def get_paginated_response(self, data):
return Response({
"query": "posts",
"count": self.page.paginator.count,
"size": self.page_size,
'next': self.get_next_link(),
'previous': self.get_previous_link(),
'posts': data
})
class CommentResultsSetPagination(PageNumberPagination):
page_size_query_param = 'size'
page_size = 20
def get_paginated_response(self, data):
return Response({
"query": "comments",
"count": self.page.paginator.count,
"size": self.page_size,
'next': self.get_next_link(),
'previous': self.get_previous_link(),
'comments': data
})
class UserViewSet(viewsets.ModelViewSet):
"""
A viewset for viewing and editing user instances.
"""
serializer_class = UserSerializer
queryset = User.objects.all()
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
"""
GET /author/posts
Requires Auth
Response:
query (string)
size (int)
next (string)
prev (string)
posts (list of posts with comments)
"""
class AuthorStream(generics.ListAPIView):
authentication_classes = (BasicAuthentication, )
permission_classes = (IsAuthenticated,)
queryset = Post.objects.all()
serializer_class = PostSerializer
pagination_class = PostsResultsSetPagination
def get_queryset(self):
# when declaring authentication, user can be found in request
user = self.request.user
if user.is_staff:
for node in Node.objects.all():
if node.user == user:
return Post.objects.all().exclude(visibility="SERVERONLY")
return Post.objects.all()
# could refactor to use permissions but whatevs
postsQuerySet = Post.objects.all().filter(visibility="PUBLIC")
ownQuerySet = Post.objects.all().filter(author__user=user).exclude(visibility="PUBLIC")
privateQuerySet = Post.objects.all().filter(visibility="PRIVATE").filter(author__user=user)
querySet = postsQuerySet | privateQuerySet | ownQuerySet
# get friends and foaf posts
for friend in Author.objects.get(user=user).friends.all():
friendQuerySet = Post.objects.all().filter(author=friend).filter(visibility="FRIENDS")
friendFoafQuerySet = Post.objects.all().filter(author=friend).filter(visibility="FOAF")
serverQuerySet = Post.objects.all().filter(author=friend).filter(visibility="SERVERONLY")
querySet = querySet | friendQuerySet | serverQuerySet | friendFoafQuerySet
for foaf in friend.friends.all():
foafQuerySet = Post.objects.all().filter(author=foaf).filter(visibility="FOAF")
querySet = querySet | foafQuerySet
return querySet
"""
GET /author/<authorID>/posts
Requires Auth
Response:
query (string)
size (int)
next (string)
prev (string)
posts (list of posts)
"""
class PersonalAuthorStream(generics.ListAPIView):
authentication_classes = (BasicAuthentication, )
permission_classes = (IsAuthenticated,)
queryset = Post.objects.all()
serializer_class = PostSerializer
pagination_class = PostsResultsSetPagination
def get_queryset(self):
# when declaring authentication, user can be found in request
authorId = self.request.parser_context.get('kwargs').get('pk')
user = self.request.user
author = Author.objects.get(id=authorId)
# could refactor to use permissions but whatevs
authorPosts = Post.objects.all().filter(author=author)
#if admin show everything
if (self.request.user.is_staff and not self.request.user == author.user):
return authorPosts.exclude(visibility="PRIVATE")
publicPosts = authorPosts.all().filter(visibility="PUBLIC")
privatePosts = authorPosts.all().filter(visibility="PRIVATE").filter(author__user=user)
foafPosts = authorPosts.all().filter(visibility="FOAF").filter(author__user=user)
querySet = publicPosts | privatePosts | foafPosts
if (Author.objects.get(user=user) in author.friends.all().filter(user=user) or Author.objects.get(user=user) == author):
friendQuerySet = authorPosts.filter(visibility="FRIENDS")
serverQuerySet = authorPosts.filter(visibility="SERVERONLY")
querySet = querySet | friendQuerySet | serverQuerySet
return querySet
class AuthorViewSet(APIView):
authentication_classes = (BasicAuthentication, )
permission_classes = (IsAuthorOrReadOnly,)
queryset = Author.objects.all()
serializer_class = AuthorSerializer
"""
POST /author/
Response:
displayname (string)
password (string)
first_name (string)
password (string)
email (string)
bio (string)
host (string)
github (string)
friends (list)
id (UUID)
"""
def get(self, request):
# queryset = Author.objects.all().filter(host="http://"+request.get_host()+"/")
queryset = Author.objects.all()
serializer = AuthorSerializer(queryset, many=True)
return Response(serializer.data)
"""
POST /author/
Request:
displayname (string)
password (string)
first_name (string)
password (string)
email (string)
bio (string)
host (string)
github (string)
friends (list)
Response:
displayname (string)
password (string)
first_name (string)
password (string)
email (string)
bio (string)
host (string)
github (string)
friends (list)
id (UUID)
"""
def post(self, request):
serializer = AuthorSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class AuthorProfileUpdateView(APIView):
serializer_class = AuthorSerializer
authentication_classes = (BasicAuthentication, )
permission_classes = (IsAuthorOrReadOnly, )
'''
Request:
displayname (string)
password (string)
first_name (string)
password (string)
email (string)
bio (string)
host (string)
github (string)
friends (list)
Response (author object):
displayname (string)
password (string)
first_name (string)
password (string)
email (string)
bio (string)
host (string)
github (string)
friends (list)
'''
def put(self, request, pk):
authorObj = Author.objects.get(id=pk)
serializer = AuthorSerializer(authorObj, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get(self, request, pk):
authorObj = Author.objects.get(id=pk)
serializer = AuthorSerializer(authorObj)
return Response(serializer.data, status=status.HTTP_200_OK)
class PostView(generics.ListCreateAPIView):
'''
A viewset for service/posts/
public posts are shown by get_queryset as default
response(post_object)
'id': UUID
'title': string
'source': URL
'origin': URL
'description': string
'content': string
'category': string
'visibility': choice selection
'content type': choice selection
'''
queryset = Post.objects.all()
serializer_class = PostSerializer
pagination_class = PostsResultsSetPagination
authentication_classes = (BasicAuthentication, )
permission_classes = (IsAuthenticated, )
def get_queryset(self):
return Post.objects.all().filter(visibility="PUBLIC")
def post(self,request):
'''
POST method for post
requires(post_object)
'id': UUID
'title': string
'source': URL
'origin': URL
'description': string
'content': string
'category': string
'visibility': choice selection
'content type': choice selection
responses are the same
'''
serializer = PostSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
'''
A viewset for service/foreignposts/
public posts from foreign nodes
response(post_object)
'id': UUID
'title': string
'source': URL
'origin': URL
'description': string
'content': string
'category': string
'visibility': choice selection
'content type': choice selection
'''
class ForeignPostView(generics.ListAPIView):
authentication_classes = (BasicAuthentication, )
permission_classes = (IsAuthenticated, )
queryset = ForeignPost.objects.all()
serializer_class = ForeignPostSerializer
rc = RemoteConnection()
def createForeignPosts(self, request):
res = []
for node in Node.objects.all():
try:
r = self.rc.get_from_remote(node.node_url+"author/posts?size=1000/", auth = self.rc.get_node_auth(node.node_url))
# print 'testing:' + r.text
print json.loads(r.text)
except:
continue
try:
if 'posts' in json.loads(r.text):
print "WORKING ON POST ----------------"
print json.loads(r.text).get('posts')
serializer = ForeignPostSerializer(data=json.loads(r.text).get('posts'), many=True)
if serializer.is_valid():
print "SAVING POST----------------------"
serializer.save()
print "post saved"
res.extend(serializer.data)
else:
print "WORKING ON POST ---------------- FAILED"
print serializer.errors
for post in json.loads(r.text).get('posts'):
if post.get('comments'):
print "WORKING ON COMMENT ----------------"
print post.get('id')
print post.get('title')
comment_serializer = CommentSerializer(data=post.get('comments'), context={'foreign_id': post.get('id')}, many=True)
if comment_serializer.is_valid():
comment_serializer.save()
else:
print "SAVING COMMENTS FOR FOREIGN POST FAILED IN VIEWS"
print post
print serializer.errors
res.extend(serializer.errors)
except:
continue
return
def createFriendRequest(self, authorId, friends):
req = dict()
req['author'] = str(authorId)
req['authors'] = friends
req['query'] = 'friends'
return req
def get(self, request, format=None):
source = "http://" + self.request.get_host() + "/"
user = self.request.user
author = Author.objects.get(user=user)
# Delete everything.
ForeignPost.objects.all().delete()
# Get and create remote public posts
self.createForeignPosts(request)
queryset = ForeignPost.objects.all()
pubqueryset = queryset.filter(visibility="PUBLIC")
# Get a list of remote friends for FOAF
friends = []
# query set of foaf but NOT friends
notfriend_foaf_queryset = ForeignPost.objects.none()
# Get friend posts
for friend in Author.objects.get(user=user).friends.all():
if not friend.host[-1] == "/":
friend.host = friend.host + "/"
friends.append(str(friend.id))
# If friend is not local
if not friend.host == source:
# Put friend in remote_friends to be later used for FOAF
friend_node = Node.objects.get(node_url = friend.host)
url = "%sfriends/%s/%s/" % (friend_node.node_url, author.id, friend.id)
# check node's server if currently friends
r = self.rc.get_from_remote(url, auth = self.rc.get_node_auth(friend_node.node_url))
response = json.loads(r.text)
# if currently friends
if json.loads(r.text).get('friends'):
friend_queryset = queryset.filter(author=friend)
friend_only_queryset = friend_queryset.filter(visibility="FRIENDS")
friend_foaf_queryset = friend_queryset.filter(visibility="FOAF")
notfriend_foaf_queryset = notfriend_foaf_queryset | queryset.filter(visibility="FOAF").exclude(author=friend)
pubqueryset = pubqueryset | friend_only_queryset | friend_foaf_queryset
# authors who are foaf
foaf = []
for post in notfriend_foaf_queryset:
# POST list of friends
authorId = post.author.id
author_host = post.author.host
if not author_host[-1] == "/":
author_host = author_host + "/"
try:
author_node = Node.objects.get(node_url = author_host)
except:
print "Remote author node not found"
url = "%sfriends/%s/" % (author_node.node_url, authorId)
print url
# extend with list of local friends
print friends
data = self.createFriendRequest(authorId, friends)
# send a list of my friends to author
r = self.rc.post_to_remote(url, data, self.rc.get_node_auth(author_node.node_url))
# get list of friends that they have in common
true_friends = json.loads(r.text).get("authors")
# if any friends are the same, add post
print "FOAAAAAAAAF"
print true_friends
if len(true_friends) > 0:
pubqueryset = pubqueryset | notfriend_foaf_queryset.filter(id=post.id)
serializer = ForeignPostSerializer(pubqueryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def get_queryset(self):
return ForeignPost.objects.all()
class PostIDView(generics.RetrieveUpdateDestroyAPIView):
authentication_classes = (BasicAuthentication, )
permission_classes = (IsPostAuthorOrReadOnly, )
queryset = Post.objects.all()
serializer_class = PostSerializer
'''
APIView for service/posts/<post_id>/
response(post_object)
'id': UUID
'title': string
'source': URL
'origin': URL
'description': string
'content': string
'category': string
'visibility': choice selection
'content type': choice selection
'''
def get(self, request, pk, format=None):
queryset = get_object_or_404(Post,id=pk)
# TODO: Refactor this gross code
if queryset.visibility == "FRIENDS":
if not queryset.author.friends.all().get(user=request.user) and not (queryset.author.user==request.user):
return Response("The post id does not exist", status=status.HTTP_400_BAD_REQUEST)
if queryset.visibility == "PRIVATE":
if not (queryset.author.user==request.user):
return Response("The post id does not exist", status=status.HTTP_400_BAD_REQUEST)
print "GETTING ID POST"
serializer = PostSerializer(queryset)
res = dict()
res["posts"] = serializer.data
res["count"] = 1
res["size"] = 10
return Response(res, status=status.HTTP_200_OK)
class ImageView(generics.ListCreateAPIView):
authentication_classes = (BasicAuthentication, )
permission_classes = (IsAuthenticated, )
serializer_class = ImageSerializer
'''
APIView for service/images/
response(image_object)
'id': UUID
'user': author
'photo': imagefile
'origin': origin
'''
def get_queryset(self):
return Image.objects.all()
def post(self, request):
'''
POST method for images
requires(post_object)
'id': UUID
'user': author
'photo': imagefile
'''
serializer = ImageSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ImageIDView(generics.CreateAPIView):
serializer_class = ImageSerializer
def get(self, request, pk, format=None):
queryset = get_object_or_404(Image, id=pk)
serializer = ImageSerializer(queryset)
image = serializer.data['photo']
contenttype = image.split('.')[-1]
with open(settings.MEDIA_ROOT + '/' + image, "rb") as file:
return HttpResponse(file.read(), content_type="image/" + contenttype)
class PersonalImagesView(generics.ListAPIView):
authentication_classes = (BasicAuthentication, )
permission_classes = (IsAuthenticated, )
serializer_class = ImageSerializer
def get_queryset(self):
return Image.objects.all().filter(user__user=self.request.user)
class CommentView(generics.ListCreateAPIView):
'''
List API view for comment
GET /posts/<post_id>/comments
response
id (UUID)
content (string)
author (object)
publishtime
post(UUID)
POST /posts/<post_id>/comments
request
content(string)
response
id (UUID)
content (string)
author (object)
publishtime
post(UUID)
'''
queryset = Comment.objects.all()
serializer_class = CommentSerializer
authentication_classes = (BasicAuthentication, )
permission_classes = (IsAuthenticated, )
pagination_class = CommentResultsSetPagination
rc = RemoteConnection()
def get_queryset(self):
postID = self.request.parser_context.get('kwargs').get('pk')
queryset = Comment.objects.all().filter(post=postID)
return queryset
def post(self, request, pk):
post = None
foreignpost = None
try: #look for local post
post = Post.objects.get(id=pk)
except: # else look for remote post
try:
foreignpost = ForeignPost.objects.get(id=pk)
except:
# Post doesn't exist
return Response("Post doesn't exist'", status=status.HTTP_404_NOT_FOUND)
if (foreignpost): # if post is foreign
print "HEY FOREIGN POSTS"
remote_host = self.rc.makesure_host_with_slash(foreignpost.author.host)
url = "%sposts/%s/comments/" % (remote_host, foreignpost.id)
# try:
r = self.rc.post_to_remote(url, request.data, self.rc.get_node_auth(remote_host))
print r.text
return Response(r.text, status=status.HTTP_404_NOT_FOUND)
# except:
# print r.text
# return Response("Sending comment to remote post failed", status=status.HTTP_400_BAD_REQUEST)
user = request.user
data = request.data
comment_data = request.data.pop("comment")
data_author = comment_data.pop("author")
print data
print data_author
source = "http://" + self.request.get_host() + "/"
author_host = data_author.get("host")
author_id = data_author.get("id")
post_url = request.data.pop("post")
if not author_host[-1] == "/":
author_host = author_host + "/"
# this is for local posts
# Check if user is local
print source
print author_host
print source == author_host
if (source == author_host):
# user is local
print "LOCAL COMMENT AT " + source
author = Author.objects.get(user=user)
print request.data
print comment_data
id = None
try:
print "getting id"
id = comment_data.pop('id')
except:
try:
print "nope getting uuid"
id = comment_data.pop('guid')
except:
Response("No Id found", status=status.HTTP_400_BAD_REQUEST)
comment = Comment.objects.create(id=id, author=author, post=post, **comment_data)
else: # make sure author is from a node
try:
print "GETTING NODE"
print author_host
author_node = Node.objects.get(node_url = author_host)
except:
return Response("Author not from approved node", status=status.HTTP_400_BAD_REQUEST)
try:
author = Author.objects.get(id=author_id)
print "GETTING AUTHOR"
except: # author not yet in database so we should create them
print "NOT AUTHOR"
user = User.objects.create(username = author_host[0:-1] + "__" + data_author.pop("displayName"))
try:
data_author.pop('url')
except:
pass
author = Author.objects.create(user=user, url=author_host+"author/"+author_id+"/", **data_author)
try:
id = comment_data.pop('id')
except:
try:
id = comment_data.pop('guid')
except:
Response("No Id found", status=status.HTTP_400_BAD_REQUEST)
comment = Comment.objects.create(author=author, id=id, post=post, **comment_data)
serializer = CommentSerializer(comment)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def __unicode__(self):
return "Parent post:"+ str(self.parent_post.id) + ", Author:" + self.author.displayName + ": " + self.content
class FriendsWith(APIView):
"""
GET /friends/<authorID>
Response:
authors (list): containing friend list of author
"""
def get(self, request, pk, format=None):
sf = SyncFriend()
sf.syncfriend(request)
queryset = Author.objects.get(id=pk)
serializer = FriendsWithSerializer(queryset)
res=serializer.data
res['query']='friends'
return Response(res)
"""
POST /friend/<authorID>
Request:
query (string): "friends"
author (string): id of current author
authors (list): list of authors to check friendship
Response:
query (string): "friends"
author (string): id of current author
authors (list): list of authors that are friends
"""
def post(self, request, pk, format=None):
sf = SyncFriend()
sf.syncfriend(request)
if request.data['query'] != 'friends':
return Response("Incorrect request field: 'query' field should be 'friends'.", status.HTTP_400_BAD_REQUEST)
friend_queryset = Author.objects.get(id=request.data['author']).friends.all()
request_list = request.data['authors']
match_list = []
for friend in request_list:
if friend_queryset.filter(id=friend).exists():
match_list.append(str(friend))
res = dict()
res['authors'] = match_list
res['author'] = request.data['author']
res['query'] = 'friends'
return Response(res)
class FriendRequestView(APIView):
def get_author_info(self, request, key_name):
res=dict()
node = self.rc.sync_hostname_if_local(request.data[key_name]["host"])
author_id = request.data[key_name]["id"]
if(node == self.myNode or node == self.myNode2):
res["obj"] = Author.objects.get(id = author_id)
res["is_local"] = True
else:
if Author.objects.filter(id = author_id).exists():
res["obj"] = Author.objects.get(id = author_id)
else:
foreign_user=User(username = request.data[key_name]["host"] + "__" + request.data[key_name]["displayName"], is_active=False)
foreign_user.save()
url = node
if node[-1] != '/':
url += '/'
url += (str(author_id)+'/')
res["obj"] = Author(user=foreign_user, id=request.data[key_name]["id"], host = node, url = url)
res["obj"].save()
res["is_local"] = False
return res
# May need to use this !
'''
def check_empty_foreign_record(self, foreign_author):
if foreign_author.friends.all().count()==0 and len(foreign_author.get_request_sent())==0 and len(foreign_author.get_request_received())==0:
foreign_author.user.delete()
'''
# Handles the request creation
def post_request(self, request):
sender = self.get_author_info(request, 'friend')
receiver = self.get_author_info(request, 'author')
if (not sender["is_local"]) and (not receiver["is_local"]):
return Response("Neither author is local on this server.", status.HTTP_400_BAD_REQUEST)
if receiver["obj"].friends.all().filter(id=sender["obj"].id).exists():
return Response("Already friends.", status.HTTP_200_OK)
if sender["obj"] in receiver["obj"].get_request_received():
return Response("Friend request already sent.", status.HTTP_200_OK)
# If sender already send a request then just add friend.
# Add friend first, if not getting 200 is only their bad.
if receiver["obj"] in sender["obj"].get_request_received():
sender['obj'].friends.add(receiver['obj'])
FriendRequest.objects.filter(sender=receiver['obj'], receiver=sender['obj']).delete()
return Response("Friend added.", status.HTTP_200_OK)
# This is the communicating process to other nodes.
if (sender["is_local"]) and (not receiver["is_local"]):
remote_host = self.rc.makesure_host_with_slash(receiver["obj"].host)
url = remote_host+'friendrequest/'
r = self.rc.post_to_remote(url, request.data, self.rc.get_node_auth(remote_host))
if r.status_code != 200 and r.status_code != 201:
return Response("Not getting 200 or 201 from the remote.", status.HTTP_400_BAD_REQUEST)
# -------------------------------------------------
# Otherwise get the request object created.
friend_request = FriendRequest.objects.create(sender=sender["obj"], receiver=receiver["obj"])
friend_request.save()
return Response("Friend request sent.", status.HTTP_200_OK)
def reject_request(self, request):
senderObj = Author.objects.get(id=request.data["friend"]["id"])
receiverObj = Author.objects.get(id=request.data["author"]["id"])
FriendRequest.objects.filter(sender=senderObj, receiver=receiverObj).delete()
return Response("Friend request rejected.", status.HTTP_200_OK)
def unfriend(self, request):
senderObj = Author.objects.get(id=request.data["friend"]["id"])
receiverObj = Author.objects.get(id=request.data["author"]["id"])
senderObj.friends.remove(receiverObj)
# print receiverObj.host, senderObj.host, self.myNode
'''
if receiverObj.host != self.myNode and receiverObj.host != self.myNode2:
self.check_empty_foreign_record(receiverObj)
if senderObj.host != self.myNode and senderObj.host != self.myNode2:
self.check_empty_foreign_record(senderObj)
'''
return Response("Unfriend done.", status.HTTP_200_OK)
def post(self, request):
self.rc = RemoteConnection()
# With or withour slash.
self.myNode = self.rc.sync_hostname_if_local('http://'+request.get_host()+'/')
self.myNode2 = self.rc.sync_hostname_if_local('http://'+request.get_host())
sf = SyncFriend()
sf.syncfriend(request)
if not self.rc.check_node_valid(request):
return Response("What's this node?", status.HTTP_401_UNAUTHORIZED)
if request.data['query'] == 'friendrequest':
return self.post_request(request)
if request.data['query'] == 'rejectrequest':
return self.reject_request(request)
elif request.data['query'] == 'unfriend':
return self.unfriend(request)
else:
return Response("Bad request header.", status.HTTP_400_BAD_REQUEST)
class FriendSyncView(APIView):
def get(self, request):
sf = SyncFriend()
return sf.syncfriend(request, is_from_client=True)
class FriendCheck(APIView):
"""
GET /friends/<authorID1>/<authorID2>
Response:
query (string): "friends"
authors (string): ids of checked authors
friends (bool): true iff friends
"""
rc = RemoteConnection()
def get(self, request, id1, id2, format=None):
# sf = SyncFriend()
# sf.syncfriend(request)
res = dict()
res['authors'] = [id1, id2]
res['query'] = "friends"
try:
queryset1 = Author.objects.get(id=id1)
queryset2 = Author.objects.get(id=id2)
except Author.DoesNotExist:
res['friends'] = False
return Response(res)
list1 = [str(id['id']) for id in queryset1.friends.all().values('id')]
list2 = [str(id['id']) for id in queryset2.friends.all().values('id')]
res['friends'] = (id1 in list2 and id2 in list1)
return Response(res)
class Login(APIView):
authentication_classes = (BasicAuthentication, )
permission_classes = (IsAuthenticated,)
"""
POST /login
Request:
encoded login (string): base64 encoded username:password
Response:
author (object): author of corresponding user
"""
def post(self, request):
if request.user.is_authenticated() is False:
login(request, request.user)
author = Author.objects.get(user=request.user)
serializer = AuthorSerializer(author)
return Response({'author': serializer.data})
|
|
# -*- coding: utf-8 -*-
"""
Django settings for opendatahub project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
from __future__ import unicode_literals
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import datetime
import sys
import os
import dj_database_url
from authentication.config import * # noqa
# SECURITY WARNING: don't run with debug turned on in production!
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
DEBUG = bool(os.environ.get('DJANGO_DEBUG', False))
TEMPLATE_DEBUG = DEBUG
PRODUCTION = os.getenv('DJANGO_CONFIG') == 'PRODUCTION'
USE_SSL = bool(os.getenv('DJANGO_SSLIFY', False))
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
WEBAPP_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'webapp')
WEBAPP_DIR = os.path.join(WEBAPP_ROOT, 'dist' if PRODUCTION else 'app')
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r)gg!i^!6=62c8p416@n^x0@nc3#h)dj3ge10l*977u@np6=--'
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
'warnings': {
'level': 'WARN',
'class': 'hub.exceptions.OdhLoggingHandler'
}
},
'loggers': {
'': {
'handlers': ['console', 'mail_admins', 'warnings'],
'propagate': True,
'level': 'INFO' if not DEBUG else 'DEBUG',
},
'django.db.backends': {
# otherwise prints the base64 encoded files which is simply too much for the console to handle
'level': 'WARN',
'propagate': True,
},
'Fiona': {
# default verbosity slows down everything way too much
'level': 'WARN',
'propagate': True,
},
'fastkml': {
# emits warnings if the file does not contains a geometry
'handlers': ['null'],
'level': 'ERROR',
'propagate': False
},
'hub.tests': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False
}
},
}
ALLOWED_HOSTS = [host.strip() for host in
os.environ.get('DJANGO_ALLOWED_HOSTS', 'localhost,192.168.56.101').split(',')]
# correct protocol (http vs. https) when behind reverse proxy like heroku
USE_X_FORWARDED_HOST = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'rest_framework',
'hub',
'django.contrib.staticfiles',
'rest_framework_jwt',
'authentication',
'rest_framework.authtoken',
'social.apps.django_app.default',
)
# Dev. only, not required
try:
import django_extensions # noqa
INSTALLED_APPS += ('django_extensions',)
except ImportError:
pass
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'opendatahub.middleware.error.WarningMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware', # disabled - makes no sense in our API
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'opendatahub.middleware.error.ExceptionMiddleware',
)
ROOT_URLCONF = 'opendatahub.urls'
WSGI_APPLICATION = 'opendatahub.wsgi.application'
TEMPLATE_CONTEXT_PROCESSORS = (
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
'django.contrib.auth.context_processors.auth'
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
# Heroku compliance
'default': dj_database_url.config(default='postgres://opendatahub:opendatahub@localhost:5432/opendatahub')
}
DATABASES['default'].update({
'TEST_CHARSET': 'utf8',
})
CACHES = {
# very short-lived basically for inter-request purposes only
'L1': {
# django.core.cache.backends.locmem.LocMemCache
'BACKEND': 'opendatahub.utils.cache.locmem.LocMemNoPickleCache',
'LOCATION': 'L1',
'OPTIONS': {
'TIMEOUT': 60,
'MAX_ENTRIES': 100,
}
},
# intermediate-lived general purpose memory cache
'default': {
'BACKEND': 'opendatahub.utils.cache.locmem.LocMemNoPickleCache',
'LOCATION': 'L2',
'OPTIONS': {
'TIMEOUT': 300,
'MAX_ENTRIES': 100,
}
},
# persistent cache
'L3': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'hub_cache',
'OPTIONS': {
'TIMEOUT': None,
'MAX_ENTRIES': sys.maxint,
}
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'de-ch'
TIME_ZONE = 'Europe/Zurich'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# ACCOUNT_ADAPTER = 'authentication.adapters.MessageFreeAdapter'
AUTH_USER_MODEL = 'authentication.UserProfile'
SITE_ID = 1
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=14)
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATICFILES_DIRS = (WEBAPP_DIR,)
if DEBUG:
STATICFILES_DIRS += (WEBAPP_ROOT,)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'staticfiles')
STATIC_URL = '/static/'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
'social.backends.facebook.FacebookOAuth2',
# 'social.backends.github.GithubOAuth2',
'authentication.backends.OdhGithubOAuth2'
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email', # <--- enable this one
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'authentication.pipelines.save_profile_picture'
)
JWT_ALLOW_REFRESH = True
JWT_AUTH_HEADER_PREFIX = "Bearer"
SOCIAL_AUTH_GITHUB_EXTRA_DATA = [('login', 'login')]
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
EMAIL_HOST_PASSWORD = os.environ.get('DJANGO_EMAIL_HOST_PASSWORD')
if EMAIL_HOST_PASSWORD:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'mail.gandi.net'
EMAIL_HOST_USER = 'noreply@opendatahub.ch'
EMAIL_PORT = 465
EMAIL_USE_SSL = True
SERVER_EMAIL = 'noreply@opendatahub.ch'
DEFAULT_FROM_EMAIL = 'noreply@opendatahub.ch'
ADMINS = (('Developers', 'devs@opendatahub.ch'),)
if not USE_SSL:
SSLIFY_DISABLE = True
# ODHQL Table naming prefixes
PACKAGE_PREFIX = 'ODH'
TRANSFORMATION_PREFIX = 'TRF'
TEST_RUNNER = 'hub.tests.runner.ParameterizedTestRunner'
|
|
"""
Tests for 2D compatibility.
"""
import numpy as np
import pytest
from pandas._libs.missing import is_matching_na
import pandas as pd
from pandas.core.arrays.integer import INT_STR_TO_DTYPE
from pandas.tests.extension.base.base import BaseExtensionTests
class Dim2CompatTests(BaseExtensionTests):
def test_transpose(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
shape = arr2d.shape
assert shape[0] != shape[-1] # otherwise the rest of the test is useless
assert arr2d.T.shape == shape[::-1]
def test_frame_from_2d_array(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
df = pd.DataFrame(arr2d)
expected = pd.DataFrame({0: arr2d[:, 0], 1: arr2d[:, 1]})
self.assert_frame_equal(df, expected)
def test_swapaxes(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
result = arr2d.swapaxes(0, 1)
expected = arr2d.T
self.assert_extension_array_equal(result, expected)
def test_delete_2d(self, data):
arr2d = data.repeat(3).reshape(-1, 3)
# axis = 0
result = arr2d.delete(1, axis=0)
expected = data.delete(1).repeat(3).reshape(-1, 3)
self.assert_extension_array_equal(result, expected)
# axis = 1
result = arr2d.delete(1, axis=1)
expected = data.repeat(2).reshape(-1, 2)
self.assert_extension_array_equal(result, expected)
def test_take_2d(self, data):
arr2d = data.reshape(-1, 1)
result = arr2d.take([0, 0, -1], axis=0)
expected = data.take([0, 0, -1]).reshape(-1, 1)
self.assert_extension_array_equal(result, expected)
def test_repr_2d(self, data):
# this could fail in a corner case where an element contained the name
res = repr(data.reshape(1, -1))
assert res.count(f"<{type(data).__name__}") == 1
res = repr(data.reshape(-1, 1))
assert res.count(f"<{type(data).__name__}") == 1
def test_reshape(self, data):
arr2d = data.reshape(-1, 1)
assert arr2d.shape == (data.size, 1)
assert len(arr2d) == len(data)
arr2d = data.reshape((-1, 1))
assert arr2d.shape == (data.size, 1)
assert len(arr2d) == len(data)
with pytest.raises(ValueError):
data.reshape((data.size, 2))
with pytest.raises(ValueError):
data.reshape(data.size, 2)
def test_getitem_2d(self, data):
arr2d = data.reshape(1, -1)
result = arr2d[0]
self.assert_extension_array_equal(result, data)
with pytest.raises(IndexError):
arr2d[1]
with pytest.raises(IndexError):
arr2d[-2]
result = arr2d[:]
self.assert_extension_array_equal(result, arr2d)
result = arr2d[:, :]
self.assert_extension_array_equal(result, arr2d)
result = arr2d[:, 0]
expected = data[[0]]
self.assert_extension_array_equal(result, expected)
# dimension-expanding getitem on 1D
result = data[:, np.newaxis]
self.assert_extension_array_equal(result, arr2d.T)
def test_iter_2d(self, data):
arr2d = data.reshape(1, -1)
objs = list(iter(arr2d))
assert len(objs) == arr2d.shape[0]
for obj in objs:
assert isinstance(obj, type(data))
assert obj.dtype == data.dtype
assert obj.ndim == 1
assert len(obj) == arr2d.shape[1]
def test_tolist_2d(self, data):
arr2d = data.reshape(1, -1)
result = arr2d.tolist()
expected = [data.tolist()]
assert isinstance(result, list)
assert all(isinstance(x, list) for x in result)
assert result == expected
def test_concat_2d(self, data):
left = type(data)._concat_same_type([data, data]).reshape(-1, 2)
right = left.copy()
# axis=0
result = left._concat_same_type([left, right], axis=0)
expected = data._concat_same_type([data] * 4).reshape(-1, 2)
self.assert_extension_array_equal(result, expected)
# axis=1
result = left._concat_same_type([left, right], axis=1)
assert result.shape == (len(data), 4)
self.assert_extension_array_equal(result[:, :2], left)
self.assert_extension_array_equal(result[:, 2:], right)
# axis > 1 -> invalid
msg = "axis 2 is out of bounds for array of dimension 2"
with pytest.raises(ValueError, match=msg):
left._concat_same_type([left, right], axis=2)
@pytest.mark.parametrize("method", ["backfill", "pad"])
def test_fillna_2d_method(self, data_missing, method):
arr = data_missing.repeat(2).reshape(2, 2)
assert arr[0].isna().all()
assert not arr[1].isna().any()
result = arr.fillna(method=method)
expected = data_missing.fillna(method=method).repeat(2).reshape(2, 2)
self.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
def test_reductions_2d_axis_none(self, data, method):
arr2d = data.reshape(1, -1)
err_expected = None
err_result = None
try:
expected = getattr(data, method)()
except Exception as err:
# if the 1D reduction is invalid, the 2D reduction should be as well
err_expected = err
try:
result = getattr(arr2d, method)(axis=None)
except Exception as err2:
err_result = err2
else:
result = getattr(arr2d, method)(axis=None)
if err_result is not None or err_expected is not None:
assert type(err_result) == type(err_expected)
return
assert is_matching_na(result, expected) or result == expected
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
def test_reductions_2d_axis0(self, data, method):
arr2d = data.reshape(1, -1)
kwargs = {}
if method == "std":
# pass ddof=0 so we get all-zero std instead of all-NA std
kwargs["ddof"] = 0
try:
result = getattr(arr2d, method)(axis=0, **kwargs)
except Exception as err:
try:
getattr(data, method)()
except Exception as err2:
assert type(err) == type(err2)
return
else:
raise AssertionError("Both reductions should raise or neither")
def get_reduction_result_dtype(dtype):
# windows and 32bit builds will in some cases have int32/uint32
# where other builds will have int64/uint64.
if dtype.itemsize == 8:
return dtype
elif dtype.kind in "ib":
return INT_STR_TO_DTYPE[np.dtype(int).name]
else:
# i.e. dtype.kind == "u"
return INT_STR_TO_DTYPE[np.dtype(np.uint).name]
if method in ["mean", "median", "sum", "prod"]:
# std and var are not dtype-preserving
expected = data
if method in ["sum", "prod"] and data.dtype.kind in "iub":
dtype = get_reduction_result_dtype(data.dtype)
expected = data.astype(dtype)
if data.dtype.kind == "b" and method in ["sum", "prod"]:
# We get IntegerArray instead of BooleanArray
pass
else:
assert type(expected) == type(data), type(expected)
assert dtype == expected.dtype
self.assert_extension_array_equal(result, expected)
elif method == "std":
self.assert_extension_array_equal(result, data - data)
# punt on method == "var"
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
def test_reductions_2d_axis1(self, data, method):
arr2d = data.reshape(1, -1)
try:
result = getattr(arr2d, method)(axis=1)
except Exception as err:
try:
getattr(data, method)()
except Exception as err2:
assert type(err) == type(err2)
return
else:
raise AssertionError("Both reductions should raise or neither")
# not necessarily type/dtype-preserving, so weaker assertions
assert result.shape == (1,)
expected_scalar = getattr(data, method)()
res = result[0]
assert is_matching_na(res, expected_scalar) or res == expected_scalar
class NDArrayBacked2DTests(Dim2CompatTests):
# More specific tests for NDArrayBackedExtensionArray subclasses
def test_copy_order(self, data):
# We should be matching numpy semantics for the "order" keyword in 'copy'
arr2d = data.repeat(2).reshape(-1, 2)
assert arr2d._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.copy()
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d[::2, ::2].copy()
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.copy("F")
assert not res._ndarray.flags["C_CONTIGUOUS"]
assert res._ndarray.flags["F_CONTIGUOUS"]
res = arr2d.copy("K")
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.T.copy("K")
assert not res._ndarray.flags["C_CONTIGUOUS"]
assert res._ndarray.flags["F_CONTIGUOUS"]
# order not accepted by numpy
msg = r"order must be one of 'C', 'F', 'A', or 'K' \(got 'Q'\)"
with pytest.raises(ValueError, match=msg):
arr2d.copy("Q")
# neither contiguity
arr_nc = arr2d[::2]
assert not arr_nc._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy()._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy()._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy("C")._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy("C")._ndarray.flags["F_CONTIGUOUS"]
assert not arr_nc.copy("F")._ndarray.flags["C_CONTIGUOUS"]
assert arr_nc.copy("F")._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy("K")._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy("K")._ndarray.flags["F_CONTIGUOUS"]
|
|
"""
An RDF/XML parser for RDFLib
"""
from xml.sax import make_parser
from xml.sax.handler import ErrorHandler
from xml.sax.saxutils import handler, quoteattr, escape
from urllib.parse import urljoin, urldefrag
from rdflib.namespace import RDF, is_ncname
from rdflib.term import URIRef
from rdflib.term import BNode
from rdflib.term import Literal
from rdflib.exceptions import ParserError, Error
from rdflib.parser import Parser
__all__ = ['create_parser', 'BagID', 'ElementHandler',
'RDFXMLHandler', 'RDFXMLParser']
RDFNS = RDF
# http://www.w3.org/TR/rdf-syntax-grammar/#eventterm-attribute-URI
# A mapping from unqualified terms to their qualified version.
UNQUALIFIED = {"about": RDF.about,
"ID": RDF.ID,
"type": RDF.type,
"resource": RDF.resource,
"parseType": RDF.parseType}
# http://www.w3.org/TR/rdf-syntax-grammar/#coreSyntaxTerms
CORE_SYNTAX_TERMS = [RDF.RDF, RDF.ID, RDF.about, RDF.parseType,
RDF.resource, RDF.nodeID, RDF.datatype]
# http://www.w3.org/TR/rdf-syntax-grammar/#syntaxTerms
SYNTAX_TERMS = CORE_SYNTAX_TERMS + [RDF.Description, RDF.li]
# http://www.w3.org/TR/rdf-syntax-grammar/#oldTerms
OLD_TERMS = [
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#aboutEach"),
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#aboutEachPrefix"),
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#bagID")]
NODE_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.li, ] + OLD_TERMS
NODE_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.nodeID, RDF.about]
PROPERTY_ELEMENT_EXCEPTIONS = \
CORE_SYNTAX_TERMS + [RDF.Description, ] + OLD_TERMS
PROPERTY_ATTRIBUTE_EXCEPTIONS = \
CORE_SYNTAX_TERMS + [RDF.Description, RDF.li] + OLD_TERMS
PROPERTY_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.resource, RDF.nodeID]
XMLNS = "http://www.w3.org/XML/1998/namespace"
BASE = (XMLNS, "base")
LANG = (XMLNS, "lang")
class BagID(URIRef):
__slots__ = ['li']
def __init__(self, val):
super(URIRef, self).__init__(val)
self.li = 0
def next_li(self):
self.li += 1
return RDFNS[self.li]
class ElementHandler(object):
__slots__ = ['start', 'char', 'end', 'li', 'id',
'base', 'subject', 'predicate', 'object',
'list', 'language', 'datatype', 'declared', 'data']
def __init__(self):
self.start = None
self.char = None
self.end = None
self.li = 0
self.id = None
self.base = None
self.subject = None
self.object = None
self.list = None
self.language = None
self.datatype = None
self.declared = None
self.data = None
def next_li(self):
self.li += 1
return RDFNS[self.li]
class RDFXMLHandler(handler.ContentHandler):
def __init__(self, store):
self.store = store
self.preserve_bnode_ids = False
self.reset()
def reset(self):
document_element = ElementHandler()
document_element.start = self.document_element_start
document_element.end = lambda name, qname: None
self.stack = [None, document_element, ]
self.ids = {} # remember IDs we have already seen
self.bnode = {}
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
# ContentHandler methods
def setDocumentLocator(self, locator):
self.locator = locator
def startDocument(self):
pass
def startPrefixMapping(self, prefix, namespace):
self._ns_contexts.append(self._current_context.copy())
self._current_context[namespace] = prefix
self.store.bind(prefix, namespace or "", override=False)
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElementNS(self, name, qname, attrs):
stack = self.stack
stack.append(ElementHandler())
current = self.current
parent = self.parent
base = attrs.get(BASE, None)
if base is not None:
base, frag = urldefrag(base)
if parent and parent.base:
base = urljoin(parent.base, base)
else:
systemId = self.locator.getPublicId() \
or self.locator.getSystemId()
if systemId:
base = urljoin(systemId, base)
else:
if parent:
base = parent.base
if base is None:
systemId = self.locator.getPublicId() \
or self.locator.getSystemId()
if systemId:
base, frag = urldefrag(systemId)
current.base = base
language = attrs.get(LANG, None)
if language is None:
if parent:
language = parent.language
current.language = language
current.start(name, qname, attrs)
def endElementNS(self, name, qname):
self.current.end(name, qname)
self.stack.pop()
def characters(self, content):
char = self.current.char
if char:
char(content)
def ignorableWhitespace(self, content):
pass
def processingInstruction(self, target, data):
pass
def add_reified(self, sid, xxx_todo_changeme):
(s, p, o) = xxx_todo_changeme
self.store.add((sid, RDF.type, RDF.Statement))
self.store.add((sid, RDF.subject, s))
self.store.add((sid, RDF.predicate, p))
self.store.add((sid, RDF.object, o))
def error(self, message):
locator = self.locator
info = "%s:%s:%s: " % (locator.getSystemId(),
locator.getLineNumber(),
locator.getColumnNumber())
raise ParserError(info + message)
def get_current(self):
return self.stack[-2]
# Create a read only property called current so that self.current
# give the current element handler.
current = property(get_current)
def get_next(self):
return self.stack[-1]
# Create a read only property that gives the element handler to be
# used for the next element.
next = property(get_next)
def get_parent(self):
return self.stack[-3]
# Create a read only property that gives the current parent
# element handler
parent = property(get_parent)
def absolutize(self, uri):
result = urljoin(self.current.base, uri, allow_fragments=1)
if uri and uri[-1] == "#" and result[-1] != "#":
result = "%s#" % result
return URIRef(result)
def convert(self, name, qname, attrs):
if name[0] is None:
name = URIRef(name[1])
else:
name = URIRef("".join(name))
atts = {}
for (n, v) in list(attrs.items()): # attrs._attrs.iteritems(): #
if n[0] is None:
att = n[1]
else:
att = "".join(n)
if att.startswith(XMLNS) or att[0:3].lower() == "xml":
pass
elif att in UNQUALIFIED:
# if not RDFNS[att] in atts:
atts[RDFNS[att]] = v
else:
atts[URIRef(att)] = v
return name, atts
def document_element_start(self, name, qname, attrs):
if name[0] and URIRef("".join(name)) == RDF.RDF:
# Cheap hack so 2to3 doesn't turn it into __next__
next = getattr(self, 'next')
next.start = self.node_element_start
next.end = self.node_element_end
else:
self.node_element_start(name, qname, attrs)
# self.current.end = self.node_element_end
# TODO... set end to something that sets start such that
# another element will cause error
def node_element_start(self, name, qname, attrs):
name, atts = self.convert(name, qname, attrs)
current = self.current
absolutize = self.absolutize
# Cheap hack so 2to3 doesn't turn it into __next__
next = getattr(self, 'next')
next.start = self.property_element_start
next.end = self.property_element_end
if name in NODE_ELEMENT_EXCEPTIONS:
self.error("Invalid node element URI: %s" % name)
if RDF.ID in atts:
if RDF.about in atts or RDF.nodeID in atts:
self.error(
"Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
)
id = atts[RDF.ID]
if not is_ncname(id):
self.error("rdf:ID value is not a valid NCName: %s" % id)
subject = absolutize("#%s" % id)
if subject in self.ids:
self.error(
"two elements cannot use the same ID: '%s'" % subject)
self.ids[subject] = 1 # IDs can only appear once within a document
elif RDF.nodeID in atts:
if RDF.ID in atts or RDF.about in atts:
self.error(
"Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
)
nodeID = atts[RDF.nodeID]
if not is_ncname(nodeID):
self.error(
"rdf:nodeID value is not a valid NCName: %s" % nodeID)
if self.preserve_bnode_ids is False:
if nodeID in self.bnode:
subject = self.bnode[nodeID]
else:
subject = BNode()
self.bnode[nodeID] = subject
else:
subject = BNode(nodeID)
elif RDF.about in atts:
if RDF.ID in atts or RDF.nodeID in atts:
self.error(
"Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
)
subject = absolutize(atts[RDF.about])
else:
subject = BNode()
if name != RDF.Description: # S1
self.store.add((subject, RDF.type, absolutize(name)))
language = current.language
for att in atts:
if not att.startswith(str(RDFNS)):
predicate = absolutize(att)
try:
object = Literal(atts[att], language)
except Error as e:
self.error(e.msg)
elif att == RDF.type: # S2
predicate = RDF.type
object = absolutize(atts[RDF.type])
elif att in NODE_ELEMENT_ATTRIBUTES:
continue
elif att in PROPERTY_ATTRIBUTE_EXCEPTIONS: # S3
self.error("Invalid property attribute URI: %s" % att)
continue # for when error does not throw an exception
else:
predicate = absolutize(att)
try:
object = Literal(atts[att], language)
except Error as e:
self.error(e.msg)
self.store.add((subject, predicate, object))
current.subject = subject
def node_element_end(self, name, qname):
# repeat node-elements are only allowed
# at at top-level
if self.parent.object and self.current != self.stack[2]:
self.error("Repeat node-elements inside property elements: %s"%"".join(name))
self.parent.object = self.current.subject
def property_element_start(self, name, qname, attrs):
name, atts = self.convert(name, qname, attrs)
current = self.current
absolutize = self.absolutize
# Cheap hack so 2to3 doesn't turn it into __next__
next = getattr(self, 'next')
object = None
current.data = None
current.list = None
if not name.startswith(str(RDFNS)):
current.predicate = absolutize(name)
elif name == RDF.li:
current.predicate = current.next_li()
elif name in PROPERTY_ELEMENT_EXCEPTIONS:
self.error("Invalid property element URI: %s" % name)
else:
current.predicate = absolutize(name)
id = atts.get(RDF.ID, None)
if id is not None:
if not is_ncname(id):
self.error("rdf:ID value is not a value NCName: %s" % id)
current.id = absolutize("#%s" % id)
else:
current.id = None
resource = atts.get(RDF.resource, None)
nodeID = atts.get(RDF.nodeID, None)
parse_type = atts.get(RDF.parseType, None)
if resource is not None and nodeID is not None:
self.error(
"Property element cannot have both rdf:nodeID and rdf:resource"
)
if resource is not None:
object = absolutize(resource)
next.start = self.node_element_start
next.end = self.node_element_end
elif nodeID is not None:
if not is_ncname(nodeID):
self.error(
"rdf:nodeID value is not a valid NCName: %s" % nodeID)
if self.preserve_bnode_ids is False:
if nodeID in self.bnode:
object = self.bnode[nodeID]
else:
subject = BNode()
self.bnode[nodeID] = subject
object = subject
else:
object = subject = BNode(nodeID)
next.start = self.node_element_start
next.end = self.node_element_end
else:
if parse_type is not None:
for att in atts:
if att != RDF.parseType and att != RDF.ID:
self.error("Property attr '%s' now allowed here" % att)
if parse_type == "Resource":
current.subject = object = BNode()
current.char = self.property_element_char
next.start = self.property_element_start
next.end = self.property_element_end
elif parse_type == "Collection":
current.char = None
object = current.list = RDF.nil # BNode()
# self.parent.subject
next.start = self.node_element_start
next.end = self.list_node_element_end
else: # if parse_type=="Literal":
# All other values are treated as Literal
# See: http://www.w3.org/TR/rdf-syntax-grammar/
# parseTypeOtherPropertyElt
object = Literal("", datatype=RDF.XMLLiteral)
current.char = self.literal_element_char
current.declared = {XMLNS: 'xml'}
next.start = self.literal_element_start
next.char = self.literal_element_char
next.end = self.literal_element_end
current.object = object
return
else:
object = None
current.char = self.property_element_char
next.start = self.node_element_start
next.end = self.node_element_end
datatype = current.datatype = atts.get(RDF.datatype, None)
language = current.language
if datatype is not None:
# TODO: check that there are no atts other than datatype and id
datatype = absolutize(datatype)
else:
for att in atts:
if not att.startswith(str(RDFNS)):
predicate = absolutize(att)
elif att in PROPERTY_ELEMENT_ATTRIBUTES:
continue
elif att in PROPERTY_ATTRIBUTE_EXCEPTIONS:
self.error("""Invalid property attribute URI: %s""" % att)
else:
predicate = absolutize(att)
if att == RDF.type:
o = URIRef(atts[att])
else:
if datatype is not None:
language = None
o = Literal(atts[att], language, datatype)
if object is None:
object = BNode()
self.store.add((object, predicate, o))
if object is None:
current.data = ""
current.object = None
else:
current.data = None
current.object = object
def property_element_char(self, data):
current = self.current
if current.data is not None:
current.data += data
def property_element_end(self, name, qname):
current = self.current
if current.data is not None and current.object is None:
literalLang = current.language
if current.datatype is not None:
literalLang = None
current.object = Literal(
current.data, literalLang, current.datatype)
current.data = None
if self.next.end == self.list_node_element_end:
if current.object != RDF.nil:
self.store.add((current.list, RDF.rest, RDF.nil))
if current.object is not None:
self.store.add(
(self.parent.subject, current.predicate, current.object))
if current.id is not None:
self.add_reified(current.id, (self.parent.subject,
current.predicate, current.object))
current.subject = None
def list_node_element_end(self, name, qname):
current = self.current
if self.parent.list == RDF.nil:
list = BNode()
# Removed between 20030123 and 20030905
# self.store.add((list, RDF.type, LIST))
self.parent.list = list
self.store.add((self.parent.list, RDF.first, current.subject))
self.parent.object = list
self.parent.char = None
else:
list = BNode()
# Removed between 20030123 and 20030905
# self.store.add((list, RDF.type, LIST))
self.store.add((self.parent.list, RDF.rest, list))
self.store.add((list, RDF.first, current.subject))
self.parent.list = list
def literal_element_start(self, name, qname, attrs):
current = self.current
self.next.start = self.literal_element_start
self.next.char = self.literal_element_char
self.next.end = self.literal_element_end
current.declared = self.parent.declared.copy()
if name[0]:
prefix = self._current_context[name[0]]
if prefix:
current.object = "<%s:%s" % (prefix, name[1])
else:
current.object = "<%s" % name[1]
if not name[0] in current.declared:
current.declared[name[0]] = prefix
if prefix:
current.object += (' xmlns:%s="%s"' % (prefix, name[0]))
else:
current.object += (' xmlns="%s"' % name[0])
else:
current.object = "<%s" % name[1]
for (name, value) in list(attrs.items()):
if name[0]:
if not name[0] in current.declared:
current.declared[name[0]] = self._current_context[name[0]]
name = current.declared[name[0]] + ":" + name[1]
else:
name = name[1]
current.object += (' %s=%s' % (name, quoteattr(value)))
current.object += ">"
def literal_element_char(self, data):
self.current.object += escape(data)
def literal_element_end(self, name, qname):
if name[0]:
prefix = self._current_context[name[0]]
if prefix:
end = "</%s:%s>" % (prefix, name[1])
else:
end = "</%s>" % name[1]
else:
end = "</%s>" % name[1]
self.parent.object += self.current.object + end
def create_parser(target, store):
parser = make_parser()
try:
# Workaround for bug in expatreader.py. Needed when
# expatreader is trying to guess a prefix.
parser.start_namespace_decl(
"xml", "http://www.w3.org/XML/1998/namespace")
except AttributeError:
pass # Not present in Jython (at least)
parser.setFeature(handler.feature_namespaces, 1)
rdfxml = RDFXMLHandler(store)
rdfxml.setDocumentLocator(target)
# rdfxml.setDocumentLocator(_Locator(self.url, self.parser))
parser.setContentHandler(rdfxml)
parser.setErrorHandler(ErrorHandler())
return parser
class RDFXMLParser(Parser):
def __init__(self):
pass
def parse(self, source, sink, **args):
self._parser = create_parser(source, sink)
content_handler = self._parser.getContentHandler()
preserve_bnode_ids = args.get("preserve_bnode_ids", None)
if preserve_bnode_ids is not None:
content_handler.preserve_bnode_ids = preserve_bnode_ids
# # We're only using it once now
# content_handler.reset()
# self._parser.reset()
self._parser.parse(source)
|
|
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp iSCSI storage systems.
This driver requires NetApp Clustered Data ONTAP or 7-mode
storage systems with installed iSCSI licenses.
"""
import copy
import math
import sys
import time
import uuid
import six
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
from cinder.volume.drivers.netapp.api import NaServer
from cinder.volume.drivers.netapp.options import netapp_7mode_opts
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import ssc_utils
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
from cinder.volume.drivers.netapp.utils import round_down
from cinder.volume.drivers.netapp.utils import set_safe_attr
from cinder.volume.drivers.netapp.utils import validate_instantiation
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
class NetAppLun(object):
"""Represents a LUN on NetApp storage."""
def __init__(self, handle, name, size, metadata_dict):
self.handle = handle
self.name = name
self.size = size
self.metadata = metadata_dict or {}
def get_metadata_property(self, prop):
"""Get the metadata property of a LUN."""
if prop in self.metadata:
return self.metadata[prop]
name = self.name
msg = _("No metadata property %(prop)s defined for the"
" LUN %(name)s")
msg_fmt = {'prop': prop, 'name': name}
LOG.debug(msg % msg_fmt)
def __str__(self, *args, **kwargs):
return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
% (self.handle, self.name, self.size, self.metadata)
class NetAppDirectISCSIDriver(driver.ISCSIDriver):
"""NetApp Direct iSCSI volume driver."""
VERSION = "1.0.0"
IGROUP_PREFIX = 'openstack-'
required_flags = ['netapp_login', 'netapp_password',
'netapp_server_hostname']
def __init__(self, *args, **kwargs):
super(NetAppDirectISCSIDriver, self).__init__(*args, **kwargs)
validate_instantiation(**kwargs)
self.configuration.append_config_values(netapp_connection_opts)
self.configuration.append_config_values(netapp_basicauth_opts)
self.configuration.append_config_values(netapp_transport_opts)
self.configuration.append_config_values(netapp_provisioning_opts)
self.lun_table = {}
def _create_client(self, **kwargs):
"""Instantiate a client for NetApp server.
This method creates NetApp server client for api communication.
"""
host_filer = kwargs['hostname']
LOG.debug('Using NetApp filer: %s' % host_filer)
self.client = NaServer(host=host_filer,
server_type=NaServer.SERVER_TYPE_FILER,
transport_type=kwargs['transport_type'],
style=NaServer.STYLE_LOGIN_PASSWORD,
username=kwargs['login'],
password=kwargs['password'])
if kwargs['port'] is not None:
self.client.set_port(kwargs['port'])
def _do_custom_setup(self):
"""Does custom setup depending on the type of filer."""
raise NotImplementedError()
def _check_flags(self):
"""Ensure that the flags we care about are set."""
required_flags = self.required_flags
for flag in required_flags:
if not getattr(self.configuration, flag, None):
msg = _('%s is not set') % flag
raise exception.InvalidInput(reason=msg)
def do_setup(self, context):
"""Setup the NetApp Volume driver.
Called one time by the manager after the driver is loaded.
Validate the flags we care about and setup NetApp
client.
"""
self._check_flags()
self._create_client(
transport_type=self.configuration.netapp_transport_type,
login=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port)
self._do_custom_setup()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate.
Discovers the LUNs on the NetApp server.
"""
self.lun_table = {}
self._get_lun_list()
LOG.debug("Success getting LUN list from server")
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata') or dict()
return metadata.get('Volume', None)
def create_volume(self, volume):
"""Driver entry point for creating a new volume (aka ONTAP LUN)."""
LOG.debug('create_volume on %s' % volume['host'])
# get ONTAP volume name as pool name
ontap_volume_name = volume_utils.extract_host(volume['host'],
level='pool')
if ontap_volume_name is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
lun_name = volume['name']
# start with default size, get requested size
default_size = units.Mi * 100 # 100 MB
size = default_size if not int(volume['size'])\
else int(volume['size']) * units.Gi
metadata = {'OsType': 'linux', 'SpaceReserved': 'true'}
extra_specs = get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
# warn on obsolete extra specs
na_utils.log_extra_spec_warnings(extra_specs)
self.create_lun(ontap_volume_name, lun_name, size,
metadata, qos_policy_group)
LOG.debug('Created LUN with name %s' % lun_name)
metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
metadata['Volume'] = ontap_volume_name
metadata['Qtree'] = None
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
if not metadata:
msg = _("No entry in LUN table for volume/snapshot %(name)s.")
msg_fmt = {'name': name}
LOG.warn(msg % msg_fmt)
return
self._destroy_lun(metadata['Path'])
self.lun_table.pop(name)
def _destroy_lun(self, path, force=True):
"""Destroys the lun at the path."""
lun_destroy = NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.client.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s" % seg[-1])
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume.
Since exporting is idempotent in this driver, we have nothing
to do for unexporting.
"""
pass
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance.
Do the LUN masking on the storage system so the initiator can access
the LUN on the target. Also return the iSCSI properties so the
initiator can find the LUN. This implementation does not call
_get_iscsi_properties() to get the properties because cannot store the
LUN number in the database. We only find out what the LUN number will
be during this method call so we construct the properties dictionary
ourselves.
"""
initiator_name = connector['initiator']
name = volume['name']
lun_id = self._map_lun(name, initiator_name, 'iscsi', None)
msg = _("Mapped LUN %(name)s to the initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
iqn = self._get_iscsi_service_details()
target_details_list = self._get_target_details()
msg = _("Successfully fetched target details for LUN %(name)s and "
"initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
if not target_details_list:
msg = _('No iscsi target details were found for LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
target_details = None
for tgt_detail in target_details_list:
if tgt_detail.get('interface-enabled', 'true') == 'true':
target_details = tgt_detail
break
if not target_details:
target_details = target_details_list[0]
if not target_details['address'] and target_details['port']:
msg = _('Failed to get target portal for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
if not iqn:
msg = _('Failed to get target IQN for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
properties = {}
properties['target_discovered'] = False
(address, port) = (target_details['address'], target_details['port'])
properties['target_portal'] = '%s:%s' % (address, port)
properties['target_iqn'] = iqn
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
def create_snapshot(self, snapshot):
"""Driver entry point for creating a snapshot.
This driver implements snapshots by using efficient single-file
(LUN) cloning.
"""
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
lun = self._get_lun_from_table(vol_name)
self._clone_lun(lun.name, snapshot_name, 'false')
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
self.delete_volume(snapshot)
LOG.debug("Snapshot %s deletion successful" % snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Driver entry point for creating a new volume from a snapshot.
Many would call this "cloning" and in fact we use cloning to implement
this feature.
"""
vol_size = volume['size']
snap_size = snapshot['volume_size']
snapshot_name = snapshot['name']
new_name = volume['name']
self._clone_lun(snapshot_name, new_name, 'true')
if vol_size != snap_size:
try:
self.extend_volume(volume, volume['size'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(
_("Resizing %s failed. Cleaning volume."), new_name)
self.delete_volume(volume)
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance.
Unmask the LUN on the storage system so the given initiator can no
longer access it.
"""
initiator_name = connector['initiator']
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, initiator_name)
msg = _("Unmapped LUN %(name)s from the initiator "
"%(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
def _get_ontapi_version(self):
"""Gets the supported ontapi version."""
ontapi_version = NaElement('system-get-ontapi-version')
res = self.client.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return (major, minor)
def create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
lun_create = NaElement.create_node_with_children(
'lun-create-by-size',
**{'path': path, 'size': six.text_type(size),
'ostype': metadata['OsType'],
'space-reservation-enabled': metadata['SpaceReserved']})
if qos_policy_group:
lun_create.add_new_child('qos-policy-group', qos_policy_group)
try:
self.client.invoke_successfully(lun_create, True)
except NaApiError as ex:
with excutils.save_and_reraise_exception():
msg = _("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s")
msg_args = {'lun_name': lun_name,
'volume_name': volume_name,
'ex': six.text_type(ex)}
LOG.error(msg % msg_args)
def _get_iscsi_service_details(self):
"""Returns iscsi iqn."""
raise NotImplementedError()
def _get_target_details(self):
"""Gets the target portal details."""
raise NotImplementedError()
def _create_lun_handle(self, metadata):
"""Returns lun handle based on filer type."""
raise NotImplementedError()
def _get_lun_list(self):
"""Gets the list of luns on filer."""
raise NotImplementedError()
def _extract_and_populate_luns(self, api_luns):
"""Extracts the luns from api.
Populates in the lun table.
"""
for lun in api_luns:
meta_dict = self._create_lun_meta(lun)
path = lun.get_child_content('path')
(rest, splitter, name) = path.rpartition('/')
handle = self._create_lun_handle(meta_dict)
size = lun.get_child_content('size')
discovered_lun = NetAppLun(handle, name,
size, meta_dict)
self._add_lun_to_table(discovered_lun)
def _is_naelement(self, elem):
"""Checks if element is NetApp element."""
if not isinstance(elem, NaElement):
raise ValueError('Expects NaElement')
def _map_lun(self, name, initiator, initiator_type='iscsi', lun_id=None):
"""Maps lun to the initiator and returns lun id assigned."""
metadata = self._get_lun_attr(name, 'metadata')
os = metadata['OsType']
path = metadata['Path']
if self._check_allowed_os(os):
os = os
else:
os = 'default'
igroup_name = self._get_or_create_igroup(initiator,
initiator_type, os)
lun_map = NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.client.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except NaApiError as e:
code = e.code
message = e.message
msg = _('Error mapping lun. Code :%(code)s, Message:%(message)s')
msg_fmt = {'code': code, 'message': message}
exc_info = sys.exc_info()
LOG.warn(msg % msg_fmt)
(igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator)
if lun_id is not None:
return lun_id
else:
raise exc_info[0], exc_info[1], exc_info[2]
def _unmap_lun(self, path, initiator):
"""Unmaps a lun from given initiator."""
(igroup_name, lun_id) = self._find_mapped_lun_igroup(path, initiator)
lun_unmap = NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.client.invoke_successfully(lun_unmap, True)
except NaApiError as e:
msg = _("Error unmapping lun. Code :%(code)s,"
" Message:%(message)s")
msg_fmt = {'code': e.code, 'message': e.message}
exc_info = sys.exc_info()
LOG.warn(msg % msg_fmt)
# if the lun is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
raise exc_info[0], exc_info[1], exc_info[2]
def _find_mapped_lun_igroup(self, path, initiator, os=None):
"""Find the igroup for mapped lun with initiator."""
raise NotImplementedError()
def _get_or_create_igroup(self, initiator, initiator_type='iscsi',
os='default'):
"""Checks for an igroup for an initiator.
Creates igroup if not found.
"""
igroups = self._get_igroup_by_initiator(initiator=initiator)
igroup_name = None
for igroup in igroups:
if igroup['initiator-group-os-type'] == os:
if igroup['initiator-group-type'] == initiator_type or \
igroup['initiator-group-type'] == 'mixed':
if igroup['initiator-group-name'].startswith(
self.IGROUP_PREFIX):
igroup_name = igroup['initiator-group-name']
break
if not igroup_name:
igroup_name = self.IGROUP_PREFIX + str(uuid.uuid4())
self._create_igroup(igroup_name, initiator_type, os)
self._add_igroup_initiator(igroup_name, initiator)
return igroup_name
def _get_igroup_by_initiator(self, initiator):
"""Get igroups by initiator."""
raise NotImplementedError()
def _check_allowed_os(self, os):
"""Checks if the os type supplied is NetApp supported."""
if os in ['linux', 'aix', 'hpux', 'windows', 'solaris',
'netware', 'vmware', 'openvms', 'xen', 'hyper_v']:
return True
else:
return False
def _create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igroup with specified args."""
igroup_create = NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.client.invoke_successfully(igroup_create, True)
def _add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.client.invoke_successfully(igroup_add, True)
def _add_lun_to_table(self, lun):
"""Adds LUN to cache table."""
if not isinstance(lun, NetAppLun):
msg = _("Object is not a NetApp LUN.")
raise exception.VolumeBackendAPIException(data=msg)
self.lun_table[lun.name] = lun
def _get_lun_from_table(self, name):
"""Gets LUN from cache table.
Refreshes cache if lun not found in cache.
"""
lun = self.lun_table.get(name)
if lun is None:
self._get_lun_list()
lun = self.lun_table.get(name)
if lun is None:
raise exception.VolumeNotFound(volume_id=name)
return lun
def _clone_lun(self, name, new_name, space_reserved='true',
src_block=0, dest_block=0, block_count=0):
"""Clone LUN with the given name to the new name."""
raise NotImplementedError()
def _get_lun_by_args(self, **args):
"""Retrieves luns with specified args."""
raise NotImplementedError()
def _get_lun_attr(self, name, attr):
"""Get the lun attribute if found else None."""
try:
attr = getattr(self._get_lun_from_table(name), attr)
return attr
except exception.VolumeNotFound as e:
LOG.error(_("Message: %s"), e.msg)
except Exception as e:
LOG.error(_("Error getting lun attribute. Exception: %s"),
e.__str__())
return None
def _create_lun_meta(self, lun):
raise NotImplementedError()
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_size = volume['size']
src_vol = self._get_lun_from_table(src_vref['name'])
src_vol_size = src_vref['size']
new_name = volume['name']
self._clone_lun(src_vol.name, new_name, 'true')
if vol_size != src_vol_size:
try:
self.extend_volume(volume, volume['size'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(
_("Resizing %s failed. Cleaning volume."), new_name)
self.delete_volume(volume)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
raise NotImplementedError()
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
name = volume['name']
lun = self._get_lun_from_table(name)
path = lun.metadata['Path']
curr_size_bytes = str(lun.size)
new_size_bytes = str(int(new_size) * units.Gi)
# Reused by clone scenarios.
# Hence comparing the stored size.
if curr_size_bytes != new_size_bytes:
lun_geometry = self._get_lun_geometry(path)
if (lun_geometry and lun_geometry.get("max_resize")
and int(lun_geometry.get("max_resize")) >=
int(new_size_bytes)):
self._do_direct_resize(path, new_size_bytes)
else:
self._do_sub_clone_resize(path, new_size_bytes)
self.lun_table[name].size = new_size_bytes
else:
LOG.info(_("No need to extend volume %s"
" as it is already the requested new size."), name)
def _do_direct_resize(self, path, new_size_bytes, force=True):
"""Uses the resize api to resize the lun."""
seg = path.split("/")
LOG.info(_("Resizing lun %s directly to new size."), seg[-1])
lun_resize = NaElement("lun-resize")
lun_resize.add_new_child('path', path)
lun_resize.add_new_child('size', new_size_bytes)
if force:
lun_resize.add_new_child('force', 'true')
self.client.invoke_successfully(lun_resize, True)
def _get_lun_geometry(self, path):
"""Gets the lun geometry."""
geometry = {}
lun_geo = NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
try:
result = self.client.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content("size")
geometry['bytes_per_sector'] =\
result.get_child_content("bytes-per-sector")
geometry['sectors_per_track'] =\
result.get_child_content("sectors-per-track")
geometry['tracks_per_cylinder'] =\
result.get_child_content("tracks-per-cylinder")
geometry['cylinders'] =\
result.get_child_content("cylinders")
geometry['max_resize'] =\
result.get_child_content("max-resize-size")
except Exception as e:
LOG.error(_("Lun %(path)s geometry failed. Message - %(msg)s")
% {'path': path, 'msg': e.message})
return geometry
def _get_volume_options(self, volume_name):
"""Get the value for the volume option."""
opts = []
vol_option_list = NaElement("volume-options-list-info")
vol_option_list.add_new_child('volume', volume_name)
result = self.client.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name("options")
if options:
opts = options.get_children()
return opts
def _get_vol_option(self, volume_name, option_name):
"""Get the value for the volume option."""
value = None
options = self._get_volume_options(volume_name)
for opt in options:
if opt.get_child_content('name') == option_name:
value = opt.get_child_content('value')
break
return value
def _move_lun(self, path, new_path):
"""Moves the lun at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug("Moving lun %(name)s to %(new_name)s."
% {'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
self.client.invoke_successfully(lun_move, True)
def _do_sub_clone_resize(self, path, new_size_bytes):
"""Does sub lun clone after verification.
Clones the block ranges and swaps
the luns also deletes older lun
after a successful clone.
"""
seg = path.split("/")
LOG.info(_("Resizing lun %s using sub clone to new size."), seg[-1])
name = seg[-1]
vol_name = seg[2]
lun = self._get_lun_from_table(name)
metadata = lun.metadata
compression = self._get_vol_option(vol_name, 'compression')
if compression == "on":
msg = _('%s cannot be sub clone resized'
' as it is hosted on compressed volume')
raise exception.VolumeBackendAPIException(data=msg % name)
else:
block_count = self._get_lun_block_count(path)
if block_count == 0:
msg = _('%s cannot be sub clone resized'
' as it contains no blocks.')
raise exception.VolumeBackendAPIException(data=msg % name)
new_lun = 'new-%s' % (name)
self.create_lun(vol_name, new_lun, new_size_bytes, metadata)
try:
self._clone_lun(name, new_lun, block_count=block_count)
self._post_sub_clone_resize(path)
except Exception:
with excutils.save_and_reraise_exception():
new_path = '/vol/%s/%s' % (vol_name, new_lun)
self._destroy_lun(new_path)
def _post_sub_clone_resize(self, path):
"""Try post sub clone resize in a transactional manner."""
st_tm_mv, st_nw_mv, st_del_old = None, None, None
seg = path.split("/")
LOG.info(_("Post clone resize lun %s"), seg[-1])
new_lun = 'new-%s' % (seg[-1])
tmp_lun = 'tmp-%s' % (seg[-1])
tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
new_path = "/vol/%s/%s" % (seg[2], new_lun)
try:
st_tm_mv = self._move_lun(path, tmp_path)
st_nw_mv = self._move_lun(new_path, path)
st_del_old = self._destroy_lun(tmp_path)
except Exception as e:
if st_tm_mv is None:
msg = _("Failure staging lun %s to tmp.")
raise exception.VolumeBackendAPIException(data=msg % (seg[-1]))
else:
if st_nw_mv is None:
self._move_lun(tmp_path, path)
msg = _("Failure moving new cloned lun to %s.")
raise exception.VolumeBackendAPIException(
data=msg % (seg[-1]))
elif st_del_old is None:
LOG.error(_("Failure deleting staged tmp lun %s."),
tmp_lun)
else:
LOG.error(_("Unknown exception in"
" post clone resize lun %s."), seg[-1])
LOG.error(_("Exception details: %s") % (e.__str__()))
def _get_lun_block_count(self, path):
"""Gets block counts for the lun."""
LOG.debug("Getting lun block count.")
block_count = 0
lun_infos = self._get_lun_by_args(path=path)
if not lun_infos:
seg = path.split('/')
msg = _('Failure getting lun info for %s.')
raise exception.VolumeBackendAPIException(data=msg % seg[-1])
lun_info = lun_infos[-1]
bs = int(lun_info.get_child_content('block-size'))
ls = int(lun_info.get_child_content('size'))
block_count = ls / bs
return block_count
class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
"""NetApp C-mode iSCSI volume driver."""
DEFAULT_VS = 'openstack'
def __init__(self, *args, **kwargs):
super(NetAppDirectCmodeISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_cluster_opts)
def _do_custom_setup(self):
"""Does custom setup for ontap cluster."""
self.vserver = self.configuration.netapp_vserver
self.vserver = self.vserver if self.vserver else self.DEFAULT_VS
# We set vserver in client permanently.
# To use tunneling enable_tunneling while invoking api
self.client.set_vserver(self.vserver)
# Default values to run first api
self.client.set_api_version(1, 15)
(major, minor) = self._get_ontapi_version()
self.client.set_api_version(major, minor)
self.ssc_vols = None
self.stale_vols = set()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
ssc_utils.check_ssc_api_permissions(self.client)
super(NetAppDirectCmodeISCSIDriver, self).check_for_setup_error()
def create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group=None):
"""Creates a LUN, handling ONTAP differences as needed."""
super(NetAppDirectCmodeISCSIDriver, self).create_lun(
volume_name, lun_name, size, metadata, qos_policy_group)
self._update_stale_vols(
volume=ssc_utils.NetAppVolume(volume_name, self.vserver))
def _get_target_details(self):
"""Gets the target portal details."""
iscsi_if_iter = NaElement('iscsi-interface-get-iter')
result = self.client.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
if result.get_child_content('num-records')\
and int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
iscsi_if_list = attr_list.get_children()
for iscsi_if in iscsi_if_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
d['interface-enabled'] = iscsi_if.get_child_content(
'is-interface-enabled')
tgt_list.append(d)
return tgt_list
def _get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = NaElement('iscsi-service-get-iter')
result = self.client.invoke_successfully(iscsi_service_iter, True)
if result.get_child_content('num-records') and\
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
iscsi_service = attr_list.get_child_by_name('iscsi-service-info')
return iscsi_service.get_child_content('node-name')
LOG.debug('No iscsi service found for vserver %s' % (self.vserver))
return None
def _create_lun_handle(self, metadata):
"""Returns lun handle based on filer type."""
return '%s:%s' % (self.vserver, metadata['Path'])
def _get_lun_list(self):
"""Gets the list of luns on filer.
Gets the luns from cluster with vserver.
"""
tag = None
while True:
api = NaElement('lun-get-iter')
api.add_new_child('max-records', '100')
if tag:
api.add_new_child('tag', tag, True)
lun_info = NaElement('lun-info')
lun_info.add_new_child('vserver', self.vserver)
query = NaElement('query')
query.add_child_elem(lun_info)
api.add_child_elem(query)
result = self.client.invoke_successfully(api)
if result.get_child_by_name('num-records') and\
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
self._extract_and_populate_luns(attr_list.get_children())
tag = result.get_child_content('next-tag')
if tag is None:
break
def _find_mapped_lun_igroup(self, path, initiator, os=None):
"""Find the igroup for mapped lun with initiator."""
initiator_igroups = self._get_igroup_by_initiator(initiator=initiator)
lun_maps = self._get_lun_map(path)
if initiator_igroups and lun_maps:
for igroup in initiator_igroups:
igroup_name = igroup['initiator-group-name']
if igroup_name.startswith(self.IGROUP_PREFIX):
for lun_map in lun_maps:
if lun_map['initiator-group'] == igroup_name:
return (igroup_name, lun_map['lun-id'])
return (None, None)
def _get_lun_map(self, path):
"""Gets the lun map by lun path."""
tag = None
map_list = []
while True:
lun_map_iter = NaElement('lun-map-get-iter')
lun_map_iter.add_new_child('max-records', '100')
if tag:
lun_map_iter.add_new_child('tag', tag, True)
query = NaElement('query')
lun_map_iter.add_child_elem(query)
query.add_node_with_children('lun-map-info', **{'path': path})
result = self.client.invoke_successfully(lun_map_iter, True)
tag = result.get_child_content('next-tag')
if result.get_child_content('num-records') and \
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
lun_maps = attr_list.get_children()
for lun_map in lun_maps:
lun_m = dict()
lun_m['initiator-group'] = lun_map.get_child_content(
'initiator-group')
lun_m['lun-id'] = lun_map.get_child_content('lun-id')
lun_m['vserver'] = lun_map.get_child_content('vserver')
map_list.append(lun_m)
if tag is None:
break
return map_list
def _get_igroup_by_initiator(self, initiator):
"""Get igroups by initiator."""
tag = None
igroup_list = []
while True:
igroup_iter = NaElement('igroup-get-iter')
igroup_iter.add_new_child('max-records', '100')
if tag:
igroup_iter.add_new_child('tag', tag, True)
query = NaElement('query')
igroup_iter.add_child_elem(query)
igroup_info = NaElement('initiator-group-info')
query.add_child_elem(igroup_info)
igroup_info.add_new_child('vserver', self.vserver)
initiators = NaElement('initiators')
igroup_info.add_child_elem(initiators)
initiators.add_node_with_children('initiator-info',
**{'initiator-name': initiator})
des_attrs = NaElement('desired-attributes')
des_ig_info = NaElement('initiator-group-info')
des_attrs.add_child_elem(des_ig_info)
des_ig_info.add_node_with_children('initiators',
**{'initiator-info': None})
des_ig_info.add_new_child('vserver', None)
des_ig_info.add_new_child('initiator-group-name', None)
des_ig_info.add_new_child('initiator-group-type', None)
des_ig_info.add_new_child('initiator-group-os-type', None)
igroup_iter.add_child_elem(des_attrs)
result = self.client.invoke_successfully(igroup_iter, False)
tag = result.get_child_content('next-tag')
if result.get_child_content('num-records') and\
int(result.get_child_content('num-records')) > 0:
attr_list = result.get_child_by_name('attributes-list')
igroups = attr_list.get_children()
for igroup in igroups:
ig = dict()
ig['initiator-group-os-type'] = igroup.get_child_content(
'initiator-group-os-type')
ig['initiator-group-type'] = igroup.get_child_content(
'initiator-group-type')
ig['initiator-group-name'] = igroup.get_child_content(
'initiator-group-name')
igroup_list.append(ig)
if tag is None:
break
return igroup_list
def _clone_lun(self, name, new_name, space_reserved='true',
src_block=0, dest_block=0, block_count=0):
"""Clone LUN with the given handle to the new name."""
metadata = self._get_lun_attr(name, 'metadata')
volume = metadata['Volume']
# zAPI can only handle 2^24 blocks per range
bc_limit = 2 ** 24 # 8GB
# zAPI can only handle 32 block ranges per call
br_limit = 32
z_limit = br_limit * bc_limit # 256 GB
z_calls = int(math.ceil(block_count / float(z_limit)))
zbc = block_count
if z_calls == 0:
z_calls = 1
for call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
else:
block_count = zbc
clone_create = NaElement.create_node_with_children(
'clone-create',
**{'volume': volume, 'source-path': name,
'destination-path': new_name,
'space-reserve': space_reserved})
if block_count > 0:
block_ranges = NaElement("block-ranges")
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
for segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
else:
block_count = bc
block_range = NaElement.create_node_with_children(
'block-range',
**{'source-block-number': str(src_block),
'destination-block-number': str(dest_block),
'block-count': str(block_count)})
block_ranges.add_child_elem(block_range)
src_block += int(block_count)
dest_block += int(block_count)
clone_create.add_child_elem(block_ranges)
self.client.invoke_successfully(clone_create, True)
LOG.debug("Cloned LUN with new name %s" % new_name)
lun = self._get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s'
% (volume, new_name))
if len(lun) == 0:
msg = _("No cloned lun named %s found on the filer")
raise exception.VolumeBackendAPIException(data=msg % (new_name))
clone_meta = self._create_lun_meta(lun[0])
self._add_lun_to_table(NetAppLun('%s:%s' % (clone_meta['Vserver'],
clone_meta['Path']),
new_name,
lun[0].get_child_content('size'),
clone_meta))
self._update_stale_vols(
volume=ssc_utils.NetAppVolume(volume, self.vserver))
def _get_lun_by_args(self, **args):
"""Retrieves lun with specified args."""
lun_iter = NaElement('lun-get-iter')
lun_iter.add_new_child('max-records', '100')
query = NaElement('query')
lun_iter.add_child_elem(query)
query.add_node_with_children('lun-info', **args)
luns = self.client.invoke_successfully(lun_iter)
attr_list = luns.get_child_by_name('attributes-list')
return attr_list.get_children()
def _create_lun_meta(self, lun):
"""Creates lun metadata dictionary."""
self._is_naelement(lun)
meta_dict = {}
meta_dict['Vserver'] = lun.get_child_content('vserver')
meta_dict['Volume'] = lun.get_child_content('volume')
meta_dict['Qtree'] = lun.get_child_content('qtree')
meta_dict['Path'] = lun.get_child_content('path')
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = \
lun.get_child_content('is-space-reservation-enabled')
return meta_dict
def _configure_tunneling(self, do_tunneling=False):
"""Configures tunneling for ontap cluster."""
if do_tunneling:
self.client.set_vserver(self.vserver)
else:
self.client.set_vserver(None)
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
sync = True if self.ssc_vols is None else False
ssc_utils.refresh_cluster_ssc(self, self.client,
self.vserver, synchronous=sync)
LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_iSCSI_Cluster_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or netapp_backend
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['pools'] = self._get_pool_stats()
na_utils.provide_ems(self, self.client, data, netapp_backend)
self._stats = data
def _get_pool_stats(self):
"""Retrieve pool (i.e. ONTAP volume) stats info from SSC volumes."""
pools = []
if not self.ssc_vols:
return pools
for vol in self.ssc_vols['all']:
pool = dict()
pool['pool_name'] = vol.id['name']
pool['QoS_support'] = False
pool['reserved_percentage'] = 0
# convert sizes to GB and de-rate by NetApp multiplier
total = float(vol.space['size_total_bytes'])
total /= self.configuration.netapp_size_multiplier
total /= units.Gi
pool['total_capacity_gb'] = round_down(total, '0.01')
free = float(vol.space['size_avl_bytes'])
free /= self.configuration.netapp_size_multiplier
free /= units.Gi
pool['free_capacity_gb'] = round_down(free, '0.01')
pool['netapp_raid_type'] = vol.aggr['raid_type']
pool['netapp_disk_type'] = vol.aggr['disk_type']
mirrored = vol in self.ssc_vols['mirrored']
pool['netapp_mirrored'] = six.text_type(mirrored).lower()
pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
dedup = vol in self.ssc_vols['dedup']
pool['netapp_dedup'] = six.text_type(dedup).lower()
pool['netapp_nodedup'] = six.text_type(not dedup).lower()
compression = vol in self.ssc_vols['compression']
pool['netapp_compression'] = six.text_type(compression).lower()
pool['netapp_nocompression'] = six.text_type(
not compression).lower()
thin = vol in self.ssc_vols['thin']
pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
pool['netapp_thick_provisioned'] = six.text_type(not thin).lower()
pools.append(pool)
return pools
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy if reset."""
if volume:
self.stale_vols.add(volume)
if reset:
set_copy = copy.deepcopy(self.stale_vols)
self.stale_vols.clear()
return set_copy
@utils.synchronized("refresh_ssc_vols")
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
self.ssc_vols = vols
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
try:
lun = self._get_lun_from_table(volume['name'])
except exception.VolumeNotFound:
lun = None
netapp_vol = None
if lun:
netapp_vol = lun.get_metadata_property('Volume')
super(NetAppDirectCmodeISCSIDriver, self).delete_volume(volume)
if netapp_vol:
self._update_stale_vols(
volume=ssc_utils.NetAppVolume(netapp_vol, self.vserver))
class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
"""NetApp 7-mode iSCSI volume driver."""
def __init__(self, *args, **kwargs):
super(NetAppDirect7modeISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_7mode_opts)
def _do_custom_setup(self):
"""Does custom setup depending on the type of filer."""
self.vfiler = self.configuration.netapp_vfiler
self.volume_list = self.configuration.netapp_volume_list
if self.volume_list:
self.volume_list = self.volume_list.split(',')
self.volume_list = [el.strip() for el in self.volume_list]
(major, minor) = self._get_ontapi_version()
self.client.set_api_version(major, minor)
if self.vfiler:
self.client.set_vfiler(self.vfiler)
self.vol_refresh_time = None
self.vol_refresh_interval = 1800
self.vol_refresh_running = False
self.vol_refresh_voluntary = False
self.root_volume_name = self._get_root_volume_name()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
api_version = self.client.get_api_version()
if api_version:
major, minor = api_version
if major == 1 and minor < 9:
msg = _("Unsupported ONTAP version."
" ONTAP version 7.3.1 and above is supported.")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _("Api version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
super(NetAppDirect7modeISCSIDriver, self).check_for_setup_error()
def create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group=None):
"""Creates a LUN, handling ONTAP differences as needed."""
super(NetAppDirect7modeISCSIDriver, self).create_lun(
volume_name, lun_name, size, metadata, qos_policy_group)
self.vol_refresh_voluntary = True
def _get_filer_volumes(self, volume=None):
"""Returns list of filer volumes in api format."""
vol_request = NaElement('volume-list-info')
if volume:
vol_request.add_new_child('volume', volume)
res = self.client.invoke_successfully(vol_request, True)
volumes = res.get_child_by_name('volumes')
if volumes:
return volumes.get_children()
return []
def _get_root_volume_name(self):
# switch to volume-get-root-name API when possible
vols = self._get_filer_volumes()
for vol in vols:
volume_name = vol.get_child_content('name')
if self._get_vol_option(volume_name, 'root') == 'true':
return volume_name
LOG.warn(_('Could not determine root volume name '
'on %s.') % self._get_owner())
return None
def _get_igroup_by_initiator(self, initiator):
"""Get igroups by initiator."""
igroup_list = NaElement('igroup-list-info')
result = self.client.invoke_successfully(igroup_list, True)
igroups = []
igs = result.get_child_by_name('initiator-groups')
if igs:
ig_infos = igs.get_children()
if ig_infos:
for info in ig_infos:
initiators = info.get_child_by_name('initiators')
init_infos = initiators.get_children()
if init_infos:
for init in init_infos:
if init.get_child_content('initiator-name')\
== initiator:
d = dict()
d['initiator-group-os-type'] = \
info.get_child_content(
'initiator-group-os-type')
d['initiator-group-type'] = \
info.get_child_content(
'initiator-group-type')
d['initiator-group-name'] = \
info.get_child_content(
'initiator-group-name')
igroups.append(d)
return igroups
def _get_target_details(self):
"""Gets the target portal details."""
iscsi_if_iter = NaElement('iscsi-portal-list-info')
result = self.client.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
portal_list_entries = result.get_child_by_name(
'iscsi-portal-list-entries')
if portal_list_entries:
portal_list = portal_list_entries.get_children()
for iscsi_if in portal_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
tgt_list.append(d)
return tgt_list
def _get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = NaElement('iscsi-node-get-name')
result = self.client.invoke_successfully(iscsi_service_iter, True)
return result.get_child_content('node-name')
def _get_owner(self):
if self.vfiler:
owner = '%s:%s' % (self.configuration.netapp_server_hostname,
self.vfiler)
else:
owner = self.configuration.netapp_server_hostname
return owner
def _create_lun_handle(self, metadata):
"""Returns lun handle based on filer type."""
owner = self._get_owner()
return '%s:%s' % (owner, metadata['Path'])
def _get_lun_list(self):
"""Gets the list of luns on filer."""
lun_list = []
if self.volume_list:
for vol in self.volume_list:
try:
luns = self._get_vol_luns(vol)
if luns:
lun_list.extend(luns)
except NaApiError:
LOG.warn(_("Error finding luns for volume %s."
" Verify volume exists.") % (vol))
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
self._extract_and_populate_luns(lun_list)
def _get_vol_luns(self, vol_name):
"""Gets the luns for a volume."""
api = NaElement('lun-list-info')
if vol_name:
api.add_new_child('volume-name', vol_name)
result = self.client.invoke_successfully(api, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def _find_mapped_lun_igroup(self, path, initiator, os=None):
"""Find the igroup for mapped lun with initiator."""
lun_map_list = NaElement.create_node_with_children(
'lun-map-list-info',
**{'path': path})
result = self.client.invoke_successfully(lun_map_list, True)
igroups = result.get_child_by_name('initiator-groups')
if igroups:
igroup = None
lun_id = None
found = False
igroup_infs = igroups.get_children()
for ig in igroup_infs:
initiators = ig.get_child_by_name('initiators')
init_infs = initiators.get_children()
for info in init_infs:
if info.get_child_content('initiator-name') == initiator:
found = True
igroup = ig.get_child_content('initiator-group-name')
lun_id = ig.get_child_content('lun-id')
break
if found:
break
return (igroup, lun_id)
def _clone_lun(self, name, new_name, space_reserved='true',
src_block=0, dest_block=0, block_count=0):
"""Clone LUN with the given handle to the new name."""
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
(parent, splitter, name) = path.rpartition('/')
clone_path = '%s/%s' % (parent, new_name)
# zAPI can only handle 2^24 blocks per range
bc_limit = 2 ** 24 # 8GB
# zAPI can only handle 32 block ranges per call
br_limit = 32
z_limit = br_limit * bc_limit # 256 GB
z_calls = int(math.ceil(block_count / float(z_limit)))
zbc = block_count
if z_calls == 0:
z_calls = 1
for call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
else:
block_count = zbc
clone_start = NaElement.create_node_with_children(
'clone-start', **{'source-path': path,
'destination-path': clone_path,
'no-snap': 'true'})
if block_count > 0:
block_ranges = NaElement("block-ranges")
# zAPI can only handle 2^24 block ranges
bc_limit = 2 ** 24 # 8GB
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
for segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
else:
block_count = bc
block_range = NaElement.create_node_with_children(
'block-range',
**{'source-block-number': str(src_block),
'destination-block-number': str(dest_block),
'block-count': str(block_count)})
block_ranges.add_child_elem(block_range)
src_block += int(block_count)
dest_block += int(block_count)
clone_start.add_child_elem(block_ranges)
result = self.client.invoke_successfully(clone_start, True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
self._check_clone_status(clone_id, vol_uuid, name, new_name)
self.vol_refresh_voluntary = True
luns = self._get_lun_by_args(path=clone_path)
if luns:
cloned_lun = luns[0]
self._set_space_reserve(clone_path, space_reserved)
clone_meta = self._create_lun_meta(cloned_lun)
handle = self._create_lun_handle(clone_meta)
self._add_lun_to_table(
NetAppLun(handle, new_name,
cloned_lun.get_child_content('size'),
clone_meta))
else:
raise NaApiError('ENOLUNENTRY', 'No Lun entry found on the filer')
def _set_space_reserve(self, path, enable):
"""Sets the space reserve info."""
space_res = NaElement.create_node_with_children(
'lun-set-space-reservation-info',
**{'path': path, 'enable': enable})
self.client.invoke_successfully(space_res, True)
def _check_clone_status(self, clone_id, vol_uuid, name, new_name):
"""Checks for the job till completed."""
clone_status = NaElement('clone-list-status')
cl_id = NaElement('clone-id')
clone_status.add_child_elem(cl_id)
cl_id.add_node_with_children(
'clone-id-info',
**{'clone-op-id': clone_id, 'volume-uuid': vol_uuid})
running = True
clone_ops_info = None
while running:
result = self.client.invoke_successfully(clone_status, True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
for info in ops_info:
if info.get_child_content('clone-state') == 'running':
time.sleep(1)
break
else:
running = False
clone_ops_info = info
break
else:
if clone_ops_info:
fmt = {'name': name, 'new_name': new_name}
if clone_ops_info.get_child_content('clone-state')\
== 'completed':
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s completed" % fmt)
else:
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s failed" % fmt)
raise NaApiError(
clone_ops_info.get_child_content('error'),
clone_ops_info.get_child_content('reason'))
def _get_lun_by_args(self, **args):
"""Retrieves luns with specified args."""
lun_info = NaElement.create_node_with_children('lun-list-info', **args)
result = self.client.invoke_successfully(lun_info, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def _create_lun_meta(self, lun):
"""Creates lun metadata dictionary."""
self._is_naelement(lun)
meta_dict = {}
meta_dict['Path'] = lun.get_child_content('path')
meta_dict['Volume'] = lun.get_child_content('path').split('/')[2]
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = lun.get_child_content(
'is-space-reservation-enabled')
return meta_dict
def _update_volume_stats(self):
"""Retrieve stats info from filer."""
# ensure we get current data
self.vol_refresh_voluntary = True
self._refresh_volume_info()
LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_iSCSI_7mode_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or netapp_backend
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['pools'] = self._get_pool_stats()
na_utils.provide_ems(self, self.client, data, netapp_backend,
server_type='7mode')
self._stats = data
def _get_pool_stats(self):
"""Retrieve pool (i.e. ONTAP volume) stats info from volumes."""
pools = []
if not self.vols:
return pools
for vol in self.vols:
# omit volumes not specified in the config
volume_name = vol.get_child_content('name')
if self.volume_list and volume_name not in self.volume_list:
continue
# omit root volume
if volume_name == self.root_volume_name:
continue
# ensure good volume state
state = vol.get_child_content('state')
inconsistent = vol.get_child_content('is-inconsistent')
invalid = vol.get_child_content('is-invalid')
if (state != 'online' or
inconsistent != 'false' or
invalid != 'false'):
continue
pool = dict()
pool['pool_name'] = volume_name
pool['QoS_support'] = False
pool['reserved_percentage'] = 0
# convert sizes to GB and de-rate by NetApp multiplier
total = float(vol.get_child_content('size-total') or 0)
total /= self.configuration.netapp_size_multiplier
total /= units.Gi
pool['total_capacity_gb'] = round_down(total, '0.01')
free = float(vol.get_child_content('size-available') or 0)
free /= self.configuration.netapp_size_multiplier
free /= units.Gi
pool['free_capacity_gb'] = round_down(free, '0.01')
pools.append(pool)
return pools
def _get_lun_block_count(self, path):
"""Gets block counts for the lun."""
bs = super(
NetAppDirect7modeISCSIDriver, self)._get_lun_block_count(path)
api_version = self.client.get_api_version()
if api_version:
major = api_version[0]
minor = api_version[1]
if major == 1 and minor < 15:
bs = bs - 1
return bs
def _refresh_volume_info(self):
"""Saves the volume information for the filer."""
if (self.vol_refresh_time is None or self.vol_refresh_voluntary or
timeutils.is_newer_than(self.vol_refresh_time,
self.vol_refresh_interval)):
try:
job_set = set_safe_attr(self, 'vol_refresh_running', True)
if not job_set:
LOG.warn(
_("Volume refresh job already running. Returning..."))
return
self.vol_refresh_voluntary = False
self.vols = self._get_filer_volumes()
self.vol_refresh_time = timeutils.utcnow()
except Exception as e:
LOG.warn(_("Error refreshing volume info. Message: %s"),
six.text_type(e))
finally:
set_safe_attr(self, 'vol_refresh_running', False)
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
super(NetAppDirect7modeISCSIDriver, self).delete_volume(volume)
self.vol_refresh_voluntary = True
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tests.compat import MagicMock
from datetime import datetime
import six
from airflow import models
from airflow.contrib.operators.bigquery_get_data import BigQueryGetDataOperator
from airflow.contrib.operators.bigquery_operator import \
BigQueryCreateExternalTableOperator, BigQueryCreateEmptyTableOperator, \
BigQueryDeleteDatasetOperator, BigQueryCreateEmptyDatasetOperator, \
BigQueryOperator, BigQueryConsoleLink, BigQueryGetDatasetOperator, \
BigQueryPatchDatasetOperator, BigQueryUpdateDatasetOperator
from airflow.contrib.operators.bigquery_table_delete_operator import \
BigQueryTableDeleteOperator
from airflow.contrib.operators.bigquery_to_bigquery import \
BigQueryToBigQueryOperator
from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator
from airflow.contrib.operators.bigquery_to_mysql_operator import BigQueryToMySqlOperator
from airflow.exceptions import AirflowException
from airflow.models import DAG, TaskFail, TaskInstance, XCom
from airflow.serialization.serialized_objects import SerializedDAG
from airflow.settings import Session
from airflow.utils.db import provide_session
from tests.compat import mock
if six.PY2:
# Need `assertWarns` back-ported from unittest2
import unittest2 as unittest
else:
import unittest
TASK_ID = 'test-bq-create-table-operator'
TEST_DATASET = 'test-dataset'
TEST_GCP_PROJECT_ID = 'test-project'
TEST_DELETE_CONTENTS = True
TEST_TABLE_ID = 'test-table-id'
TEST_GCS_BUCKET = 'test-bucket'
TEST_GCS_DATA = ['dir1/*.csv']
TEST_SOURCE_FORMAT = 'CSV'
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'test-bigquery-operators'
class BigQueryCreateEmptyTableOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryCreateEmptyTableOperator(task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.create_empty_table \
.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
schema_fields=None,
time_partitioning={},
labels=None,
encryption_configuration=None
)
class BigQueryCreateExternalTableOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryCreateExternalTableOperator(
task_id=TASK_ID,
destination_project_dataset_table='{}.{}'.format(
TEST_DATASET, TEST_TABLE_ID
),
schema_fields=[],
bucket=TEST_GCS_BUCKET,
source_objects=TEST_GCS_DATA,
source_format=TEST_SOURCE_FORMAT
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.create_external_table \
.assert_called_once_with(
external_project_dataset_table='{}.{}'.format(
TEST_DATASET, TEST_TABLE_ID
),
schema_fields=[],
source_uris=['gs://{}/{}'.format(TEST_GCS_BUCKET, source_object)
for source_object in TEST_GCS_DATA],
source_format=TEST_SOURCE_FORMAT,
compression='NONE',
skip_leading_rows=0,
field_delimiter=',',
max_bad_records=0,
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
src_fmt_configs={},
labels=None,
encryption_configuration=None
)
class BigQueryDeleteDatasetOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryDeleteDatasetOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
delete_contents=TEST_DELETE_CONTENTS
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.delete_dataset \
.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
delete_contents=TEST_DELETE_CONTENTS
)
class BigQueryCreateEmptyDatasetOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryCreateEmptyDatasetOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.create_empty_dataset \
.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
dataset_reference={}
)
class BigQueryGetDatasetOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryGetDatasetOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.get_dataset \
.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID
)
class BigQueryPatchDatasetOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
dataset_resource = {"friendlyName": 'Test DS'}
operator = BigQueryPatchDatasetOperator(
dataset_resource=dataset_resource,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.patch_dataset \
.assert_called_once_with(
dataset_resource=dataset_resource,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID
)
class BigQueryUpdateDatasetOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
dataset_resource = {"friendlyName": 'Test DS'}
operator = BigQueryUpdateDatasetOperator(
dataset_resource=dataset_resource,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.update_dataset \
.assert_called_once_with(
dataset_resource=dataset_resource,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID
)
class BigQueryOperatorTest(unittest.TestCase):
def test_bql_deprecation_warning(self):
with self.assertWarns(DeprecationWarning) as cm:
task = BigQueryOperator(
task_id='test_deprecation_warning_for_bql',
bql='select * from test_table'
)
warning = cm.warning
assert task, "Task should be created"
assert 'Deprecated parameter `bql` used in Task id: test_deprecation_warning_for_bql. ' \
'Use `sql` parameter instead to pass the sql to be executed.' \
' `bql` parameter is deprecated and will be removed' \
' in a future version of Airflow.' == warning.args[0]
def setUp(self):
self.dagbag = models.DagBag(
dag_folder='/dev/null', include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def tearDown(self):
session = Session()
session.query(models.TaskInstance).filter_by(
dag_id=TEST_DAG_ID).delete()
session.query(TaskFail).filter_by(
dag_id=TEST_DAG_ID).delete()
session.commit()
session.close()
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
encryption_configuration = {'key': 'kk'}
operator = BigQueryOperator(
task_id=TASK_ID,
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
bigquery_conn_id='bigquery_default',
udf_config=None,
use_legacy_sql=True,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=encryption_configuration
)
operator.execute(MagicMock())
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.run_query \
.assert_called_once_with(
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=encryption_configuration
)
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute_list(self, mock_hook):
operator = BigQueryOperator(
task_id=TASK_ID,
sql=[
'Select * from test_table',
'Select * from other_test_table',
],
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
bigquery_conn_id='google_cloud_default',
udf_config=None,
use_legacy_sql=True,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=None,
)
operator.execute(MagicMock())
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.run_query \
.assert_has_calls([
mock.call(
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=None,
),
mock.call(
sql='Select * from other_test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=None,
),
])
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute_bad_type(self, mock_hook):
operator = BigQueryOperator(
task_id=TASK_ID,
sql=1,
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
bigquery_conn_id='google_cloud_default',
udf_config=None,
use_legacy_sql=True,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
)
with self.assertRaises(AirflowException):
operator.execute(MagicMock())
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_bigquery_operator_defaults(self, mock_hook):
operator = BigQueryOperator(
task_id=TASK_ID,
sql='Select * from test_table',
dag=self.dag,
default_args=self.args,
schema_update_options=None
)
operator.execute(MagicMock())
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.run_query \
.assert_called_once_with(
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=None,
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=None
)
self.assertTrue(isinstance(operator.sql, six.string_types))
ti = TaskInstance(task=operator, execution_date=DEFAULT_DATE)
ti.render_templates()
self.assertTrue(isinstance(ti.task.sql, six.string_types))
@provide_session
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_bigquery_operator_extra_link_when_missing_job_id(self, mock_hook, session):
bigquery_task = BigQueryOperator(
task_id=TASK_ID,
sql='SELECT * FROM test_table',
dag=self.dag,
)
self.dag.clear()
session.query(XCom).delete()
self.assertEqual(
'',
bigquery_task.get_extra_links(DEFAULT_DATE, BigQueryConsoleLink.name),
)
@provide_session
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_bigquery_operator_extra_link_when_single_query(self, mock_hook, session):
bigquery_task = BigQueryOperator(
task_id=TASK_ID,
sql='SELECT * FROM test_table',
dag=self.dag,
)
self.dag.clear()
session.query(XCom).delete()
ti = TaskInstance(
task=bigquery_task,
execution_date=DEFAULT_DATE,
)
job_id = '12345'
ti.xcom_push(key='job_id', value=job_id)
self.assertEquals(
'https://console.cloud.google.com/bigquery?j={job_id}'.format(job_id=job_id),
bigquery_task.get_extra_links(DEFAULT_DATE, BigQueryConsoleLink.name),
)
self.assertEquals(
'',
bigquery_task.get_extra_links(datetime(2019, 1, 1), BigQueryConsoleLink.name),
)
@provide_session
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_bigquery_operator_extra_link_when_multiple_query(self, mock_hook, session):
bigquery_task = BigQueryOperator(
task_id=TASK_ID,
sql=['SELECT * FROM test_table', 'SELECT * FROM test_table2'],
dag=self.dag,
)
self.dag.clear()
session.query(XCom).delete()
ti = TaskInstance(
task=bigquery_task,
execution_date=DEFAULT_DATE,
)
job_id = ['123', '45']
ti.xcom_push(key='job_id', value=job_id)
six.assertCountEqual(
self, {'BigQuery Console #1', 'BigQuery Console #2'},
bigquery_task.operator_extra_link_dict.keys()
)
self.assertEqual(
'https://console.cloud.google.com/bigquery?j=123',
bigquery_task.get_extra_links(DEFAULT_DATE, 'BigQuery Console #1'),
)
self.assertEqual(
'https://console.cloud.google.com/bigquery?j=45',
bigquery_task.get_extra_links(DEFAULT_DATE, 'BigQuery Console #2'),
)
def test_bigquery_operator_extra_serialized_field_when_single_query(self):
with self.dag:
BigQueryOperator(
task_id=TASK_ID,
sql='SELECT * FROM test_table',
)
serialized_dag = SerializedDAG.to_dict(self.dag)
self.assertIn("sql", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict[TASK_ID]
self.assertEqual(getattr(simple_task, "sql"), 'SELECT * FROM test_table')
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[{'airflow.contrib.operators.bigquery_operator.BigQueryConsoleLink': {}}]
)
# Check DeSerialized version of operator link
self.assertIsInstance(list(simple_task.operator_extra_links)[0], BigQueryConsoleLink)
ti = TaskInstance(task=simple_task, execution_date=DEFAULT_DATE)
ti.xcom_push('job_id', 12345)
# check for positive case
url = simple_task.get_extra_links(DEFAULT_DATE, BigQueryConsoleLink.name)
self.assertEqual(url, 'https://console.cloud.google.com/bigquery?j=12345')
# check for negative case
url2 = simple_task.get_extra_links(datetime(2017, 1, 2), BigQueryConsoleLink.name)
self.assertEqual(url2, '')
class BigQueryGetDataOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_get_data.BigQueryHook')
def test_execute(self, mock_hook):
max_results = '100'
selected_fields = 'DATE'
operator = BigQueryGetDataOperator(task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=max_results,
selected_fields=selected_fields,
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.get_tabledata \
.assert_called_once_with(
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=max_results,
selected_fields=selected_fields,
)
class BigQueryTableDeleteOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_table_delete_operator.BigQueryHook')
def test_execute(self, mock_hook):
ignore_if_missing = True
deletion_dataset_table = '{}.{}'.format(TEST_DATASET, TEST_TABLE_ID)
operator = BigQueryTableDeleteOperator(
task_id=TASK_ID,
deletion_dataset_table=deletion_dataset_table,
ignore_if_missing=ignore_if_missing
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.run_table_delete \
.assert_called_once_with(
deletion_dataset_table=deletion_dataset_table,
ignore_if_missing=ignore_if_missing
)
class BigQueryToBigQueryOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_to_bigquery.BigQueryHook')
def test_execute(self, mock_hook):
source_project_dataset_tables = '{}.{}'.format(
TEST_DATASET, TEST_TABLE_ID)
destination_project_dataset_table = '{}.{}'.format(
TEST_DATASET + '_new', TEST_TABLE_ID)
write_disposition = 'WRITE_EMPTY'
create_disposition = 'CREATE_IF_NEEDED'
labels = {'k1': 'v1'}
encryption_configuration = {'key': 'kk'}
operator = BigQueryToBigQueryOperator(
task_id=TASK_ID,
source_project_dataset_tables=source_project_dataset_tables,
destination_project_dataset_table=destination_project_dataset_table,
write_disposition=write_disposition,
create_disposition=create_disposition,
labels=labels,
encryption_configuration=encryption_configuration
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.run_copy \
.assert_called_once_with(
source_project_dataset_tables=source_project_dataset_tables,
destination_project_dataset_table=destination_project_dataset_table,
write_disposition=write_disposition,
create_disposition=create_disposition,
labels=labels,
encryption_configuration=encryption_configuration
)
class BigQueryToCloudStorageOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_to_gcs.BigQueryHook')
def test_execute(self, mock_hook):
source_project_dataset_table = '{}.{}'.format(
TEST_DATASET, TEST_TABLE_ID)
destination_cloud_storage_uris = ['gs://some-bucket/some-file.txt']
compression = 'NONE'
export_format = 'CSV'
field_delimiter = ','
print_header = True
labels = {'k1': 'v1'}
operator = BigQueryToCloudStorageOperator(
task_id=TASK_ID,
source_project_dataset_table=source_project_dataset_table,
destination_cloud_storage_uris=destination_cloud_storage_uris,
compression=compression,
export_format=export_format,
field_delimiter=field_delimiter,
print_header=print_header,
labels=labels
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.run_extract \
.assert_called_once_with(
source_project_dataset_table=source_project_dataset_table,
destination_cloud_storage_uris=destination_cloud_storage_uris,
compression=compression,
export_format=export_format,
field_delimiter=field_delimiter,
print_header=print_header,
labels=labels
)
class BigQueryToMySqlOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_to_mysql_operator.BigQueryHook')
def test_execute_good_request_to_bq(self, mock_hook):
destination_table = 'table'
operator = BigQueryToMySqlOperator(
task_id=TASK_ID,
dataset_table='{}.{}'.format(TEST_DATASET, TEST_TABLE_ID),
mysql_table=destination_table,
replace=False,
)
operator.execute(None)
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.get_tabledata \
.assert_called_once_with(
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=1000,
selected_fields=None,
start_index=0
)
|
|
import pygame
import random
from .components import *
from .utils import getBasesLinear
from .core import globalSystem
from .maths import Vector2D
class Bullet(object):
"""
The most basic bullet object representable in DML. Every
projectile, weapon, generator, or particle effect is a
child class of Bullet.
The Bullet class provides methods of attaching components
to an object such that the functionality of the component
can be used to describe the behaviour of the bullet.
"""
# The size of a random name.
_RAND_NAME_SIZE = 16**10
def __init__(self, origin, name=None, **config):
super().__init__()
if name is None:
# Generate a random name if none is given.
name = "%010x" % random.randrange(Bullet._RAND_NAME_SIZE)
self.name = name
# The origin is the origin of this bullet's local coordinate space
# relative to the world coordinate space. A bullet's position in the
# its local coordinate space is its 'position' attribute, but its
# position in the world coordinate space is the sum of its origin
# and its position.
self.origin = Vector2D(origin)
self.position = self.origin
# The current displacement is what motion components affect. It represents
# the next position the bullet will move to, relative to its local coordinate
# space.
self._current_displacement = Vector2D.origin
self.local_time = 0
self._dead = False
# Components are stored in a dictionary where the keys are the classes of the
# components and the values are a list of all components of that type attached
# to this bullet. This is so that we can say self.getComponent(Parent) and get
# all components whose parent class Parent, rather than just all components of
# type Parent.
self._components = {}
# Auto components are components that update themselves regardless of a
# bullet's ``update`` method.
self._auto_components = []
self.initialize(**config)
globalSystem.addBullet(self)
def initialize(self, **config):
"""Extra initialization. Use this to add components to a bullet."""
pass
def addComponent(self, component):
"""Add a component to this bullet."""
component = component.withBullet(self)
for base in getBasesLinear(type(component), Component):
self._components.setdefault(base, []) \
.append(component)
if component.AUTOMATIC:
self._auto_components.append(component)
def getComponent(self, componentType):
"""
Get a single component.
If multiple of the same component exist, the one that was
first created is returned.
If the component does not exist, None will be returned.
"""
components = self._components.get(componentType)
if components is None:
return components
return components[0]
def getComponents(self, componentType):
"""Get a list of components."""
return self._components.get(componentType, [])
def forEach(self, componentType):
"""
Return a list of components for which to call a method on each.
The use case for this method is as follows:
====
self.forEach(ComponentType).methodName(*args, **kwargs)
====
The above code is functionally equivalent to the following code:
====
for component in self.getComponents(ComponentType):
component.methodName(*args, **kwargs)
====
"""
return ComponentListCaller(self._components[componentType])
def getFromEach(self, componentType):
"""
Return a list of components for which to obtain a common attribute from each.
The use case for this method is as follows:
====
```
attributes = self.getFromEach(ComponentType).attributeName
```
====
The above code is functionally equivalent to the following code:
====
attributes = []
for component in self.getComponents(ComponentType):
attributes.append(component.attributeName)
====
"""
return ComponentListGetter(self._components[componentType])
def After(self, time):
"""
Return True if the local time is after the given time.
"""
return self.local_time > time
def Before(self, time):
"""
Return True if the local time is before the given time.
"""
return self.local_time < time
def From(self, start, end):
"""
Return True if the local time is between the given start and end times.
"""
return start <= self.local_time < end
def At(self, time):
"""
Return True once at the given time.
"""
return self.From(time, time + globalSystem._timestep)
def AtIntervals(self, interval, start=0, end=float('inf')):
"""
Return True at given intervals.
"""
return 0 <= self.local_time % interval < globalSystem._timestep \
and self.local_time >= start \
and self.local_time <= end
def isDead(self):
"""Check if this bullet is dead or not."""
return self._dead
def kill(self):
"""Set this bullet as dead."""
self._dead = True
def _update(self):
"""Internal update."""
self.update()
for component in self._auto_components:
component._auto()
self.local_time += globalSystem._timestep
def update(self):
"""External update. Describe your bullet's functionality here."""
pass
def render(self):
"""Activate all Render components."""
for component in self.getComponents(Render):
component.render()
def move(self):
"""Activate all Motion components."""
for component in self.getComponents(Motion):
component.moveBullet()
self.position = self.origin + self._current_displacement
self._current_displacement = Vector2D.origin
|
|
#!/usr/bin/env python
# miedema.py v1.6 12-13-2012 Jeff Doak jeff.w.doak@gmail.com
# adapted by S. Kirklin 1/7/14
import numpy as np
import sys
import yaml
import logging
import qmpy
from qmpy.utils import *
logger = logging.getLogger(__name__)
__all__ = ["Miedema"]
# data rows:
# Element_name Phi Rho Vol Z Valence TM? RtoP Htrans
params = yaml.safe_load(open(qmpy.INSTALL_PATH + "/data/miedema.yml").read())
class Miedema(object):
def __init__(self, composition):
"""
Takes a variety of composition representations and returns the miedema
model energy, if possible.
Examples::
>>> get_miedema_energy({"Fe":0.5, "Ni":0.5})
-0.03
>>> get_miedema_energy({'Fe':2, 'Ni':2})
-0.03
>>> get_miedema_energy('FeNi')
-0.03
>>> composition = Composition.get('FeNi')
>>> get_miedema_energy(composition)
-0.03
Returns:
Energy per atom. (eV/atom)
"""
self.energy = None
# validate composition
if isinstance(composition, str):
composition = parse_comp(composition)
elif isinstance(composition, qmpy.Composition):
composition = dict(composition.comp)
elif isinstance(composition, dict):
pass
else:
raise TypeError("Unrecognized composition:", composition)
if len(composition) != 2:
return None
if not all(params[k] for k in composition):
self.energy = None
return
composition = unit_comp(composition)
self.elt_a, self.elt_b = list(composition.keys())
self.x = composition[self.elt_b]
self.A = params[self.elt_a]
self.B = params[self.elt_b]
self.energy = self.H_form_ord()
@property
def P(self):
"""
Chooses a value of P based on the transition metal status of the elements
A and B.
There are 3 values of P for the cases where:
both A and B are TM
only one of A and B is a TM
neither are TMs.
"""
possibleP = [14.2, 12.35, 10.7]
if (self.A[5] + self.B[5]) == 2:
# Both elementA and elementB are Transition Metals.
return possibleP[0]
elif (self.A[5] + self.B[5]) == 1:
# Only one of elementA and elementB are Transition Metals.
return possibleP[1]
else:
# Neither elementA nor elementB are Transition Metals.
return possibleP[2]
@property
def RtoP(self):
"""Calculate and return the value of RtoP based on the transition metal
status of elements A and B, and the elemental values of RtoP for elements A
and B."""
# List of Transition Metals as given in Fig 2.28 of
# de Boer, et al., Cohesion in Metals (1988) (page 66).
tmrange = []
tmrange.extend(list(range(20, 30)))
tmrange.extend(list(range(38, 48)))
tmrange.extend(list(range(56, 58)))
tmrange.extend(list(range(72, 80)))
tmrange.extend([90, 92, 94])
# List of Non-Transition Metals as given in Fig 2.28 of
# de Boer, et al., Cohesion in Metals (1988) (page 66).
nontmrange = []
nontmrange.extend(list(range(3, 8)))
nontmrange.extend(list(range(11, 16)))
nontmrange.extend([19])
nontmrange.extend(list(range(30, 34)))
nontmrange.extend([37])
nontmrange.extend(list(range(48, 52)))
nontmrange.extend([55])
nontmrange.extend(list(range(80, 84)))
# If one of A,B is in tmrange and the other is in nontmrange, set RtoP
# to the product of elemental values, otherwise set RtoP to zero.
if (self.A[3] in tmrange) and (self.B[3] in nontmrange):
RtoP = self.A[6] * self.B[6]
elif (self.A[3] in nontmrange) and (self.B[3] in tmrange):
RtoP = self.A[6] * self.B[6]
else:
RtoP = 0.0
return RtoP
@property
def a_A(self):
return self.pick_a(self.elt_a)
@property
def a_B(self):
return self.pick_a(self.elt_b)
def pick_a(self, elt):
"""Choose a value of a based on the valence of element A."""
possible_a = [0.14, 0.1, 0.07, 0.04]
if elt == self.elt_a:
params = self.A
else:
params = self.B
if params[4] == 1:
return possible_a[0]
elif params[4] == 2:
return possible_a[1]
elif params[4] == 3:
return possible_a[2]
# elif elementA in ["Ag","Au","Ir","Os","Pd","Pt","Rh","Ru"]:
elif elt in ["Ag", "Au", "Cu"]:
return possible_a[2]
else:
return possible_a[3]
@property
def gamma(self):
"""Calculate and return the value of Gamma_AB (= Gamma_BA) for the solvation
of element A in element B."""
QtoP = 9.4 # Constant from Miedema's Model.
phi = [self.A[0], self.B[0]]
rho = [self.A[1], self.B[1]]
d_phi = phi[0] - phi[1]
d_rho = rho[0] - rho[1]
m_rho = (1 / rho[0] + 1 / rho[1]) / 2.0
gamma = self.P * (QtoP * d_rho ** 2 - d_phi ** 2 - self.RtoP) / m_rho
return int(round(gamma))
def H_form_ord(self):
"""Calculate the enthalpy of formation for an ordered compound of elements A
and B with a composition xB of element B."""
vol0_A = self.A[2]
vol0_B = self.B[2]
phi = [self.A[0], self.B[0]]
htrans = [self.A[7], self.B[7]]
# Determine volume scale parameter a.
# Calculate surface concentrations using original volumes.
c_S_A = (1 - self.x) * vol0_A / ((1 - self.x) * vol0_A + self.x * vol0_B)
c_S_B = self.x * vol0_B / ((1 - self.x) * vol0_A + self.x * vol0_B)
# Calculate surface fractions for ordered compounds using original volumes.
f_BA = c_S_B * (1 + 8 * (c_S_A * c_S_B) ** 2)
f_AB = c_S_A * (1 + 8 * (c_S_A * c_S_B) ** 2)
# Calculate new volumes using surface fractions (which use original
# volumes).
vol_A = vol0_A * (1 + self.a_A * f_BA * (phi[0] - phi[1]))
vol_B = vol0_B * (1 + self.a_B * f_AB * (phi[1] - phi[0]))
# Recalculate surface concentrations using new volumes.
c_S_A = (1 - self.x) * vol_A / ((1 - self.x) * vol_A + self.x * vol_B)
c_S_B = self.x * vol_B / ((1 - self.x) * vol_A + self.x * vol_B)
# Recalculate surface fractions for ordered compounds using new volumes.
f_BA = c_S_B * (1 + 8 * (c_S_A * c_S_B) ** 2)
f_AB = c_S_A * (1 + 8 * (c_S_A * c_S_B) ** 2)
D_htrans = self.x * htrans[1] + (1 - self.x) * htrans[0]
H_ord = (
self.gamma
* (1 - self.x)
* self.x
* vol_A
* vol_B
* (1 + 8 * (c_S_A * c_S_B) ** 2)
/ ((1 - self.x) * vol_A + self.x * vol_B)
+ D_htrans
)
return round(H_ord * 0.01036427, 2)
@staticmethod
def get(composition):
return Miedema(composition).energy
|
|
"""Artnotizen organizes all dated files in a given directory in year/month/day
folders, compiles are markdown files to HTML, and constructs an easy to navigate
single-page index in index.html."""
from collections import OrderedDict
from datetime import datetime
import os
import pipes
import re
import shlex
import subprocess
import sys
import time
import urllib
import urlparse
from jinja2 import Environment, PackageLoader, FileSystemLoader
_GENFILES_REGEXP = re.compile(r"(index\.html$)|(lib/.*)$")
_DATE_REGEXP = re.compile(r"^(\d{4})(\d{2})?(\d{2})?.*")
_DATE_PATH_REGEXP = re.compile(r"(\d{4})/(\d{2})?/?(\d{2})?/?.*")
_LIBRARY_URLS = [
"http://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js",
"http://ajax.googleapis.com/ajax/libs/jqueryui/1.10.3/jquery-ui.min.js",
]
def is_hidden(path):
"""Check whether given path is hidden.
Only works for POSIX, checks for basename starting with '.'.
Args:
path: path to check for hidden status, absolute or relative
Returns:
Boolean hidden status
"""
return os.path.basename(os.path.abspath(path))[0] == "."
def listfiles(path, hidden=False):
"""Recursively list all files below a given path. Mimics find -type f.
Args:
path: path to search for files
Returns:
Flat list of files relative to path
"""
all_files = []
for root, dirnames, files in os.walk(path, topdown=True):
if not hidden:
if is_hidden(root):
# dirnames must be modified in-place to affect subsequent
# iterations
dirnames[:] = []
continue
files = [f for f in files if not is_hidden(f)]
all_files.extend([os.path.join(root, f) for f in files])
return all_files
def organize_notes(directory):
"""Organize notes into year/month/day folders in the given directory.
Filenames are preserved, and only files matching ^\\d{4}(\\d{2})?(\\d{2})?.*
are affected.
Args:
directory: string to current working directory
Returns:
A list of all matching dated notes and a list of other files
"""
all_files = listfiles(directory)
notes = [f for f in all_files
if _DATE_REGEXP.match(os.path.basename(f))]
others = [os.path.join(directory, f )for f in all_files
if not f in notes and not _GENFILES_REGEXP.match(f)]
out = []
for note in notes:
year, month, day = _DATE_REGEXP.match(os.path.basename(note)).groups("")
note_dir = os.path.join(directory, year, month, day)
dst = os.path.join(note_dir, os.path.basename(note))
if note != dst:
# Handles directory creation and file move
os.renames(note, dst)
out.append(dst)
return out, others
def wait_for_all(running, delay, callback=None):
"""Poll all processes in running at interval delay until all are complete.
WARNING: This function modifies running in-place. Any further processing of
its processes should be handled using the callback argument.
Args:
running: dictionary with subprocess.Popen values.
delay: polling interval in seconds.
callback: optional function of (key, proc) to be called on the
completion of proc if proc has a return code of 0.
Returns:
None on completion of all processes.
"""
while running:
for key, proc in running.iteritems():
retcode = proc.poll()
if retcode is not None:
if retcode != 0:
print >> sys.stderr, "{} returned with value {}".format(
key, retcode)
elif callback is not None:
callback(key, proc)
del running[key]
break
else:
time.sleep(delay)
continue
def compile_markdown(files, markdown_ext, markdown_cmd, delay=0.1):
"""Select and compile markdown files from files to HTML.
Args:
files: list of files to filter and compile
ext: file extension for markdown files
cmd: command to compile markdown to html. Must write to stdout.
delay: polling delay for launched compilation processes.
Returns:
A list of the same length as files with the markdown filenames replaced
by the corresponding html files.
"""
md_files = [f for f in files if os.path.splitext(f)[1] == markdown_ext]
out = files[:]
cmd_args = shlex.split(markdown_cmd)
running = {}
for mkd in md_files:
html_filename = os.path.splitext(mkd)[0] + ".html"
with open(html_filename, "wb") as outfile:
args = cmd_args + [pipes.quote(mkd)]
running[" ".join(args)] = subprocess.Popen(args, stdout=outfile)
out[out.index(mkd)] = html_filename
# Poll compilation processes until all complete
wait_for_all(running, delay)
return out
class _Note(object):
"""_Note stores information regarding a particular note.
Attributes:
name: String name of the note, often a filename
path: String path to the note relative to index.html
"""
def __init__(self, path, name):
self.name = name
self.path = path
class _Group(object):
"""_Group stores groups of notes, possibly with child groups.
Attributes:
key: A key for sorting
identifier: Separate identifier for HTML elements
notes: A list of _Note's
children: A dictionary of child _Group's
"""
def __init__(self, key="", identifier=None, notes=None, children=None):
self.key = key
if identifier is None:
identifier = key
if notes is None:
notes = []
if children is None:
children = {}
self.identifier = identifier
self.notes = notes
self.children = children
def sort(self):
"""Sort children by key and notes by name.
Converts children to an OrderedDict."""
self.children = OrderedDict(sorted(self.children.items(),
key=lambda t: t[1].key))
self.notes.sort(key=lambda x: x.name)
def index_data(notes, directory, depth):
"""Extract index data from list of paths to note files.
"""
groups = {"Other notes": _Group("zzzz", "other")}
for note in notes:
info = _Note(name=os.path.basename(note),
path=os.path.relpath(note, directory))
match = _DATE_PATH_REGEXP.search(note)
if not match:
groups["Other notes"].notes.append(info)
continue
year, month, day = match.groups()
date = datetime.strptime(year + month + day, "%Y%m%d")
if year not in groups:
groups[year] = _Group(year)
if depth == "year" or month == "" or (
day == "" and depth == "week"):
groups[year].notes.append(info)
continue
if depth == "month":
nice_month = date.strftime("%B")
if nice_month not in groups[year].children:
groups[year].children[nice_month] = _Group(month, year + month)
groups[year].children[nice_month].notes.append(info)
continue
if depth == "week":
week = str(date.isocalendar()[1])
if week not in groups[year].children:
groups[year].children[week] = _Group(int(week), year + week)
groups[year].children[week].notes.append(info)
continue
groups = OrderedDict(sorted(groups.items(), key=lambda t: t[1].key,
reverse=True))
for key in groups:
groups[key].sort()
return groups
def download_libraries(library_urls, directory):
"""Download libraries from CDN as needed
Downloads libraries provided in library_urls to {directory}/lib. Does not
overwrite existing libraries with the same filenames.
Returns a list of library paths relative to directory.
"""
lib_dir = os.path.join(directory, "lib")
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
libraries = []
for url in library_urls:
filename = os.path.basename(urlparse.urlparse(url)[2])
out = os.path.join(lib_dir, filename)
libraries.append(out)
if not os.path.exists(out):
urllib.urlretrieve(url, out)
return libraries
def build_index(notes, others, directory, template_path, depth):
"""Build HTML index of notes.
Notes in year/[month/[day/]] folders are placed under appropriate headings.
Other notes are organized in lexicographic order.
"""
if os.path.exists(template_path):
env = Environment(loader=FileSystemLoader(template_path))
else:
env = Environment(loader=PackageLoader("artnotizen"))
libraries = download_libraries(_LIBRARY_URLS, directory)
env.globals = {
"notes": index_data(set(notes + others), directory, depth),
"libraries": libraries,
}
template = env.get_template("index.html")
with open(os.path.join(directory, "index.html"), "wb") as indexfile:
print >> indexfile, template.render()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet.event
import eventlet.queue
import eventlet.timeout
import mock
import testtools
from neutron.agent.linux import async_process
from neutron.agent.linux import utils
from neutron.tests import base
_marker = ()
class TestAsyncProcess(base.BaseTestCase):
def setUp(self):
super(TestAsyncProcess, self).setUp()
self.proc = async_process.AsyncProcess(['fake'])
def test_construtor_raises_exception_for_negative_respawn_interval(self):
with testtools.ExpectedException(ValueError):
async_process.AsyncProcess(['fake'], respawn_interval=-1)
def test__spawn(self):
expected_process = 'Foo'
proc = self.proc
with mock.patch.object(utils, 'create_process') as mock_create_process:
mock_create_process.return_value = [expected_process, None]
with mock.patch('eventlet.spawn') as mock_spawn:
proc._spawn()
self.assertIsInstance(proc._kill_event, eventlet.event.Event)
self.assertEqual(proc._process, expected_process)
mock_spawn.assert_has_calls([
mock.call(proc._watch_process,
proc._read_stdout,
proc._kill_event),
mock.call(proc._watch_process,
proc._read_stderr,
proc._kill_event),
])
self.assertEqual(len(proc._watchers), 2)
def test__handle_process_error_kills_with_respawn(self):
with mock.patch.object(self.proc, '_kill') as kill:
self.proc._handle_process_error()
kill.assert_has_calls(mock.call(respawning=False))
def test__handle_process_error_kills_without_respawn(self):
self.proc.respawn_interval = 1
with mock.patch.object(self.proc, '_kill') as kill:
with mock.patch.object(self.proc, '_spawn') as spawn:
with mock.patch('eventlet.sleep') as sleep:
self.proc._handle_process_error()
kill.assert_has_calls(mock.call(respawning=True))
sleep.assert_has_calls(mock.call(self.proc.respawn_interval))
spawn.assert_called_once()
def _test__watch_process(self, callback, kill_event):
self.proc._kill_event = kill_event
# Ensure the test times out eventually if the watcher loops endlessly
with eventlet.timeout.Timeout(5):
with mock.patch.object(self.proc,
'_handle_process_error') as func:
self.proc._watch_process(callback, kill_event)
if not kill_event.ready():
func.assert_called_once()
def test__watch_process_exits_on_callback_failure(self):
self._test__watch_process(lambda: False, eventlet.event.Event())
def test__watch_process_exits_on_exception(self):
def foo():
raise Exception('Error!')
self._test__watch_process(foo, eventlet.event.Event())
def test__watch_process_exits_on_sent_kill_event(self):
kill_event = eventlet.event.Event()
kill_event.send()
self._test__watch_process(None, kill_event)
def _test_read_output_queues_and_returns_result(self, output):
queue = eventlet.queue.LightQueue()
mock_stream = mock.Mock()
with mock.patch.object(mock_stream, 'readline') as mock_readline:
mock_readline.return_value = output
result = self.proc._read(mock_stream, queue)
if output:
self.assertEqual(output, result)
self.assertEqual(output, queue.get_nowait())
else:
self.assertFalse(result)
self.assertTrue(queue.empty())
def test__read_queues_and_returns_output(self):
self._test_read_output_queues_and_returns_result('foo')
def test__read_returns_none_for_missing_output(self):
self._test_read_output_queues_and_returns_result('')
def test_start_raises_exception_if_process_already_started(self):
self.proc._kill_event = True
with testtools.ExpectedException(async_process.AsyncProcessException):
self.proc.start()
def test_start_invokes__spawn(self):
with mock.patch.object(self.proc, '_spawn') as mock_start:
self.proc.start()
mock_start.assert_called_once()
def test__iter_queue_returns_empty_list_for_empty_queue(self):
result = list(self.proc._iter_queue(eventlet.queue.LightQueue()))
self.assertEqual(result, [])
def test__iter_queue_returns_queued_data(self):
queue = eventlet.queue.LightQueue()
queue.put('foo')
result = list(self.proc._iter_queue(queue))
self.assertEqual(result, ['foo'])
def _test_iter_output_calls_iter_queue_on_output_queue(self, output_type):
expected_value = 'foo'
with mock.patch.object(self.proc, '_iter_queue') as mock_iter_queue:
mock_iter_queue.return_value = expected_value
target_func = getattr(self.proc, 'iter_%s' % output_type, None)
value = target_func()
self.assertEqual(value, expected_value)
queue = getattr(self.proc, '_%s_lines' % output_type, None)
mock_iter_queue.assert_called_with(queue)
def test_iter_stdout(self):
self._test_iter_output_calls_iter_queue_on_output_queue('stdout')
def test_iter_stderr(self):
self._test_iter_output_calls_iter_queue_on_output_queue('stderr')
def _test__kill(self, respawning, pid=None):
with mock.patch.object(self.proc, '_kill_event') as mock_kill_event:
with mock.patch.object(self.proc, '_get_pid_to_kill',
return_value=pid):
with mock.patch.object(self.proc,
'_kill_process') as mock_kill_process:
self.proc._kill(respawning)
if respawning:
self.assertIsNotNone(self.proc._kill_event)
else:
self.assertIsNone(self.proc._kill_event)
mock_kill_event.send.assert_called_once()
if pid:
mock_kill_process.assert_called_once(pid)
def test__kill_when_respawning_does_not_clear_kill_event(self):
self._test__kill(True)
def test__kill_when_not_respawning_clears_kill_event(self):
self._test__kill(False)
def test__kill_targets_process_for_pid(self):
self._test__kill(False, pid='1')
def _test__get_pid_to_kill(self, expected=_marker,
root_helper=None, pids=None):
def _find_child_pids(x):
if not pids:
return []
pids.pop(0)
return pids
if root_helper:
self.proc.root_helper = root_helper
with mock.patch.object(self.proc, '_process') as mock_process:
with mock.patch.object(mock_process, 'pid') as mock_pid:
with mock.patch.object(utils, 'find_child_pids',
side_effect=_find_child_pids):
actual = self.proc._get_pid_to_kill()
if expected is _marker:
expected = mock_pid
self.assertEqual(expected, actual)
def test__get_pid_to_kill_returns_process_pid_without_root_helper(self):
self._test__get_pid_to_kill()
def test__get_pid_to_kill_returns_child_pid_with_root_helper(self):
self._test__get_pid_to_kill(expected='2', pids=['1', '2'],
root_helper='a')
def test__get_pid_to_kill_returns_last_child_pid_with_root_Helper(self):
self._test__get_pid_to_kill(expected='3', pids=['1', '2', '3'],
root_helper='a')
def test__get_pid_to_kill_returns_none_with_root_helper(self):
self._test__get_pid_to_kill(expected=None, root_helper='a')
def _test__kill_process(self, pid, expected, exception_message=None):
self.proc.root_helper = 'foo'
if exception_message:
exc = RuntimeError(exception_message)
else:
exc = None
with mock.patch.object(utils, 'execute',
side_effect=exc) as mock_execute:
actual = self.proc._kill_process(pid)
self.assertEqual(expected, actual)
mock_execute.assert_called_with(['kill', '-9', pid],
root_helper=self.proc.root_helper)
def test__kill_process_returns_true_for_valid_pid(self):
self._test__kill_process('1', True)
def test__kill_process_returns_true_for_stale_pid(self):
self._test__kill_process('1', True, 'No such process')
def test__kill_process_returns_false_for_execute_exception(self):
self._test__kill_process('1', False, 'Invalid')
def test_stop_calls_kill(self):
self.proc._kill_event = True
with mock.patch.object(self.proc, '_kill') as mock_kill:
self.proc.stop()
mock_kill.assert_called_once_with()
def test_stop_raises_exception_if_already_started(self):
with testtools.ExpectedException(async_process.AsyncProcessException):
self.proc.stop()
|
|
#!/usr/bin/env python2
# Copyright (c) 2014 The BeCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test pruning code
# ********
# WARNING:
# This test uses 4GB of disk space.
# This test takes 30 mins or more (up to 2 hours)
# ********
from test_framework.test_framework import BeCoinTestFramework
from test_framework.util import *
import os.path
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f))/(1024*1024)
class PruneTest(BeCoinTestFramework):
def __init__(self):
self.utxo = []
self.address = ["",""]
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions and full blocks to fill up our block files
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
self.txouts = "81"
for k in xrange(128):
# add txout value
self.txouts = self.txouts + "0000000000000000"
# add length of script_pubkey
self.txouts = self.txouts + "fd0402"
# add script_pubkey
self.txouts = self.txouts + script_pubkey
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
self.address[0] = self.nodes[0].getnewaddress()
self.address[1] = self.nodes[1].getnewaddress()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MB of data
for i in xrange(645):
self.mine_full_block(self.nodes[0], self.address[0])
sync_blocks(self.nodes[0:3])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print "Success"
print "Though we're already using more than 550MB, current usage:", calc_usage(self.prunedir)
print "Mining 25 more blocks should cause the first block file to be pruned"
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in xrange(25):
self.mine_full_block(self.nodes[0],self.address[0])
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 10:
raise AssertionError("blk00000.dat not pruned when it should be")
print "Success"
usage = calc_usage(self.prunedir)
print "Usage should be below target:", usage
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds"
for j in xrange(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
self.utxo = self.nodes[1].listunspent()
for i in xrange(24):
if j == 0:
self.mine_full_block(self.nodes[1],self.address[1])
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
self.utxo = self.nodes[0].listunspent()
for i in xrange(25):
self.mine_full_block(self.nodes[0],self.address[0])
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print "Usage can be over target because of high stale rate:", calc_usage(self.prunedir)
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
print "Current block height:", height
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
print "Invalidating block at height:",invalidheight,badhash
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
print "New best height", self.nodes[1].getblockcount()
# Reboot node1 to clear those giant tx's from mempool
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
print "Generating new longer chain of 300 more blocks"
self.nodes[1].generate(300)
print "Reconnect nodes"
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3])
print "Verify height on node 2:",self.nodes[2].getblockcount()
print "Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)
print "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)"
self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3])
usage = calc_usage(self.prunedir)
print "Usage should be below target:", usage
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
try:
self.nodes[2].getblock(self.forkhash)
raise AssertionError("Old block wasn't pruned so can't test redownload")
except JSONRPCException as e:
print "Will need to redownload block",self.forkheight
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
print "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
print "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload"
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
print "Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)"
print "Mining a big blockchain of 995 blocks"
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
print "Check that we haven't started pruning yet because we're below PruneAfterHeight"
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
print "Check that we'll exceed disk space target if we have a very high stale block rate"
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
print "Check that we can survive a 288 block reorg still"
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
print "Test that we can rerequest a block we previously pruned if needed for a reorg"
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
print "Done"
if __name__ == '__main__':
PruneTest().main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_node import Parameters
from library.modules.bigip_node import ApiParameters
from library.modules.bigip_node import ModuleManager
from library.modules.bigip_node import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_node import Parameters
from ansible.modules.network.f5.bigip_node import ApiParameters
from ansible.modules.network.f5.bigip_node import ModuleManager
from ansible.modules.network.f5.bigip_node import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
host='10.20.30.40',
name='10.20.30.40'
)
p = Parameters(params=args)
assert p.host == '10.20.30.40'
assert p.name == '10.20.30.40'
def test_api_parameters(self):
args = load_fixture('load_ltm_node_1.json')
p = Parameters(params=args)
assert p.address == '1.2.3.4'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_node(self, *args):
set_module_args(dict(
host='10.20.30.40',
name='mytestserver',
monitors=[
'/Common/icmp'
],
partition='Common',
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_create_node_idempotent(self, *args):
set_module_args(dict(
host='10.20.30.40',
name='mytestserver',
monitors=[
'/Common/icmp'
],
partition='Common',
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = ApiParameters(params=load_fixture('load_ltm_node_3.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[True, True])
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is False
def test_create_node_fqdn(self, *args):
set_module_args(dict(
fqdn='foo.bar',
name='mytestserver',
monitors=[
'/Common/icmp'
],
partition='Common',
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_update_node_fqdn_up_interval(self, *args):
set_module_args(dict(
fqdn='foo.bar',
fqdn_up_interval=100,
name='mytestserver',
monitors=[
'/Common/icmp'
],
partition='Common',
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = ApiParameters(params=load_fixture('load_ltm_node_2.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[True, True])
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
def test_update_node_fqdn_up_interval_idempotent(self, *args):
set_module_args(dict(
fqdn='google.com',
fqdn_up_interval=3600,
name='fqdn-foo',
monitors=[
'icmp',
'tcp_echo'
],
partition='Common',
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = ApiParameters(params=load_fixture('load_ltm_node_2.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[True, True])
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is not True
|
|
#!/usr/bin/python
"""
This is a Python script for the Lastline Analyst Connector for Bit9 Security Platform.
Copyright Bit9, Inc. 2015
support@bit9.com
Disclaimer
+++++++++++++++++++
By accessing and/or using the samples scripts provided on this site (the Scripts), you hereby agree to the following terms:
The Scripts are exemplars provided for purposes of illustration only and are not intended to represent specific
recommendations or solutions for API integration activities as use cases can vary widely.
THE SCRIPTS ARE PROVIDED AS IS WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED. BIT9 MAKES NO REPRESENTATION
OR OTHER AFFIRMATION OF FACT, INCLUDING BUT NOT LIMITED TO STATEMENTS REGARDING THE SCRIPTS SUITABILITY FOR USE OR PERFORMANCE.
IN NO EVENT SHALL BIT9 BE LIABLE FOR SPECIAL, INCIDENTAL, CONSEQUENTIAL, EXEMPLARY OR OTHER INDIRECT DAMAGES OR FOR DIRECT
DAMAGES ARISING OUT OF OR RESULTING FROM YOUR ACCESS OR USE OF THE SCRIPTS, EVEN IF BIT9 IS ADVISED OF OR AWARE OF THE
POSSIBILITY OF SUCH DAMAGES.
Requirements
+++++++++++++++++++
- Python 2.6 or 2.7
- Bit9 API client (included) which requires requests Python module
- Lastline Analysis API client (available at https://analysis.lastline.com/docs/llapi_client/analysis_apiclient.py)
- Lastline API Key
- Lastline API Token
- Bit9 Platform Server 7.2.1 or better
- Bit9 API Token (generated in Bit9 Console)
- Bit9 RBAC permission enabled for 'Extend connectors through API'
Required python modules can be installed using tools such as easy_install or pip, e.g.
easy_install requests
Configuring Connector
+++++++++++++++++++++++
Please update the script with appropriate LL_API_KEY, LL_API_TOKEN, B9_API_TOKEN, B9_SERVER with your Lastline and B9 API credentials.
By default, the client connects to an API instance running in the Lastline cloud at https://analysis.lastline.com
Starting Connector
+++++++++++++++++++++++
Start the script. No paramters are required. Debug and Error logs will be created in the script folder.
"""
import os
import sys
import datetime
import time
import logging
import json
import analysis_apiclient
import bit9api
# Lastline API parameters
class LastlineAPI:
def __init__(self, url, key, token, strong_cert, delete_after_analysis = True):
self.url = url
self.key = key
self.token = token
self.strong_cert = strong_cert # should cert be validated
self.delete_after_analysis = delete_after_analysis # should file be deleted after analysis
# B9 Connector for Lastline
class LastlineConnector:
def __init__(self, b9_api, ll_api, download_file_location, polling_period = 30, report_store_location = False, debug_log_filename = "lastline_debug.log", error_log_filename = "lastline_error.log"):
self.b9_api = b9_api
self.ll_api = ll_api
self.polling_period = polling_period
self.download_file_location = download_file_location
self.report_store_location = report_store_location
self.debug_log_filename = debug_log_filename
self.error_log_filename = error_log_filename
# Dictionary to map B9 pending analysis requests to Lastline's tasks
# Any pending requests and finished results will be kept here, together with Lastline uuid
self.bit9_pending_analysis = {}
# Dictionary to track LL tasks waiting for completion
# Any pending results will be kept here, together with their status
self.ll_tasks = {}
# Track when we last checked for completed tasks
self.last_checked_time = datetime.datetime.utcnow() - datetime.timedelta(days=1)
def start(self):
self.init_logging()
try:
logging.info("*** LL script starting")
self.ll = analysis_apiclient.AnalysisClient(self.ll_api.url, key=self.ll_api.key, api_token=self.ll_api.token)
logging.info("Connected to Lastline API [%s]" % self.ll_api.url)
# Register or update our connector
r = self.b9_api.create('v1/connector', {'name': 'Lastline', 'analysisName': 'Lastline',
'connectorVersion': '1.0', 'canAnalyze': 'true', 'analysisEnabled': 'true'})
connectorId = str(r['id'])
logging.info("Connected to B9 API [%s]" % self.b9_api.server)
except Exception as ex:
logging.error(ex)
return
# Loop forever (until killed)
while True:
try:
# Check with LL for any pending tasks
self.fetchCompletedTasks()
# Check with Bit9 Platform if we have any analysis still pending
for i in self.b9_api.retrieve("v1/pendingAnalysis", url_params="connectorId=" + connectorId):
# Process all B9 pending analysis requests for LL
self.processOneAnalysisRequest(i)
except:
logging.error(sys.exc_info()[0])
logging.error("*** Exception processing requests. Will try again in %d seconds." % self.polling_period)
time.sleep(self.polling_period)
return
def init_logging(self):
logger = logging.getLogger("analysis")
logger.setLevel(logging.DEBUG)
#clean up any pre-existing handlers!
handlers = [h for h in logger.handlers]
for h in handlers:
logger.removeHandler(h)
#create console handler and set log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
#create file handler and set log level
#we overwrite old log files in each run.
fh = logging.FileHandler(self.debug_log_filename, 'a')
fh.setLevel(logging.DEBUG)
#create file handler for the error log
#each log file grows up to 1 megabyte, and we keep 4 old ones
eh = logging.FileHandler(self.error_log_filename, 'a')
eh.setLevel(logging.ERROR)
#create formatter
console_formatter = logging.Formatter("%(message)s")
file_formatter = logging.Formatter("%(asctime)s - %(module)s(%(lineno)d) - %(message)s")
#add formatter to ch and fh
ch.setFormatter(console_formatter)
fh.setFormatter(file_formatter)
eh.setFormatter(file_formatter)
#add ch and fh to logger
logger.addHandler(ch)
logger.addHandler(fh)
logger.addHandler(eh)
logging.root = logger
def uploadFileToLL(self,pa):
uuid = None
isError = False
file = None
downloaded = False
try:
fileName = pa['fileName'].strip()
if self.download_file_location is not None:
# This is if we want to locally download file from Bit9
# (in the case shared folder is not accessible)
localFilePath = self.download_file_location + pa['fileName']
self.b9_api.retrieve_analyzed_file(pa['id'], localFilePath)
logging.debug("Downloaded file '%s'" % localFilePath)
downloaded = True
else:
# Easier option, if Bit9 shared folder can be accessed directly
localFilePath = pa['uploadPath']
file = open(localFilePath, 'rb')
logging.debug("Submitting '%s' to LL for analysis." % localFilePath)
submit_result = self.ll.submit_file(file, filename=fileName, verify=self.ll_api.strong_cert, delete_after_analysis=self.ll_api.delete_after_analysis)
logging.debug("Submit result: %s" % str(submit_result)[:1024])
result_data = submit_result.get('data', {})
# we got LL uuid. We will need it to check status of the scan at later time
uuid = result_data['task_uuid']
# Tell Bit9 that we are waiting for the scan to finish
pa['analysisStatus'] = 1 # (status: Analyzing)
pa['analysisResult'] = 0 # (status: Unknown)
# Update Bit9 status for this file
self.b9_api.update('v1/pendingAnalysis', pa)
finally:
# Delete downloaded file without exception
if file != None:
file.close()
if (downloaded):
try:
os.remove(localFilePath)
logging.debug("Removed downloaded file '%s'" % localFilePath)
except OSError:
pass
return uuid
def reportResultToBit9(self, pa, scanResults):
# We have results. Create our Bit9 notification
fileAnalysisId = pa['id']
md5 = pa['md5']
sha1 = pa['sha1']
fileName = pa['fileName']
notification = {
'fileAnalysisId': fileAnalysisId,
'product': 'Lastline',
'appliance': self.ll_api.url.replace("https://", "")
}
if 'https://analysis.lastline.com' not in self.ll_api.url:
externalUrl = '%s/malscape/#/task/%s' % ( self.ll_api.url, scanResults['task_uuid'])
notification['externalUrl'] = externalUrl
else:
notification['appliance'] = notification['appliance'].replace("analysis.lastline.com", "user.lastline.com")
if 'malicious_activity' in scanResults:
notification['anomaly'] = ', '.join(scanResults['malicious_activity'])
# Let's see if it is malicious. Use some fancy heuristics...
positivesPerc = scanResults['score']
if positivesPerc > 50:
notification['analysisResult'] = 3 # ...malicious
notification['severity'] = 'critical';
notification['type'] = 'malicious_file';
elif positivesPerc > 0:
notification['analysisResult'] = 2 # ...could be malicious
notification['severity'] = 'high';
notification['type'] = 'potential_risk_file';
else:
notification['analysisResult'] = 1 # clean!
notification['severity'] = 'low';
notification['type'] = 'clean_file';
files = []
if ('report' in scanResults):
report = scanResults['report']
if 'overview' in report:
if 'analysis_engine_version' in report['overview']:
notification['version'] = report['overview']['analysis_engine_version']
if 'analysis_engine' in report['overview']:
notification['targetOS'] = report['overview']['analysis_engine']
if ('analysis_metadata' in report):
for element in report['analysis_metadata']:
if 'metadata_type' in element and 'generated_file' == element['metadata_type']:
if 'file' in element:
writtenFile = element['file']
file = {}
if 'filename' in writtenFile:
file['fileName'] = os.path.basename(writtenFile['filename'])
file['filePath'] = os.path.dirname(writtenFile['filename'])
if 'ext_info' in writtenFile:
if 'sha1' in writtenFile['ext_info']:
file['sha1'] = writtenFile['ext_info']['sha1']
if 'md5' in writtenFile['ext_info']:
file['md5'] = writtenFile['ext_info']['md5']
if 'size' in writtenFile['ext_info']:
file['fileSize'] = writtenFile['ext_info']['size']
file['operation'] = 'created'
files.append(file)
if len(files) > 0:
file = { 'fileName' : os.path.basename(fileName),
'filePath' : os.path.dirname(fileName),
'md5' : md5,
'sha1' : sha1,
'operation' : 'created'
}
files.insert(0, file)
notification['files'] = files
self.b9_api.create("v1/notification", notification)
logging.debug("File analysis completed for '%s' [%s]: %s" % (fileName, md5, notification['type']))
def fetchTaskResult(self, uuid, fileName):
logging.debug("Querying LL for json result for '%s'" % uuid)
json_result = self.ll.get_result(uuid, raw=True)
logging.debug("Query result: %s" % str(json_result)[:1024])
result = json.loads(json_result)
success = result['success']
if not success:
logging.error("\t%s", result)
return False
if self.report_store_location:
result_filename = os.path.join(self.report_store_location, os.path.basename(fileName))
json_result = json.dumps(result, sort_keys=True, indent=4)
json_fn = result_filename + ".json"
f = open(json_fn, "w")
f.write(json_result)
#first one (in json) was successful.
#Now let's get it in raw XML.
logging.debug("Querying LL for xml result for '%s'" % uuid)
xml_result = self.ll.get_result(uuid, requested_format="xml")
xml_fn = result_filename + ".xml"
f = open(xml_fn, "w")
f.write(xml_result)
return result
def fetchCompletedTasks(self):
try:
waitingScans = sum(1 for x in self.ll_tasks.values() if x == "pending")
if (waitingScans > 0):
moreResults = 1
while (moreResults == 1):
logging.debug("Querying LL for completed tasks from %s" % str(self.last_checked_time))
result = self.ll.completed(after=self.last_checked_time, verify=self.ll_api.strong_cert)
logging.debug("Query result: %s" % result)
completed_tasks = result["data"]["tasks"]
self.last_checked_time = result["data"]["before"]
moreResults = result["data"]["more_results_available"]
completedCount = 0
if len(completed_tasks) > 0:
for uuid in completed_tasks:
if uuid in self.ll_tasks.keys():
self.ll_tasks[uuid] = "completed"
completedCount += 1
if completedCount > 0:
logging.debug("Got %s completed tasks", completedCount)
except Exception as e:
logging.error(e)
return
def processOneAnalysisRequest(self,pa):
try:
# Use md5 hash if we have one
md5 = pa['md5'].strip()
fileName = pa['fileName'].strip()
uuid = None
# Check our cache if we already sent this file for scan
if md5 in self.bit9_pending_analysis.keys():
uuid = self.bit9_pending_analysis[md5]
if uuid in self.ll_tasks:
task = self.ll_tasks[uuid]
if task == "completed":
# Get our uuid we got from LL last time around
result = self.fetchTaskResult(uuid, fileName)
if result == False:
raise Exception("Error: Result not available")
self.reportResultToBit9(pa, result['data'])
del self.ll_tasks[uuid]
del self.bit9_pending_analysis[md5]
return
else:
# Still waiting for a completed result
return
else:
# we have not asked LL yet. Try with file hash
try:
logging.debug("File analysis requested for '%s' [%s]" % (fileName, md5))
result = self.ll.submit_file_hash(md5=md5, filename=fileName, verify=self.ll_api.strong_cert)
logging.debug("Query result: %s" % str(result)[:1024])
if 'error' in result.get('data', {}):
raise Exception(result['data']['error'])
# LL task available
if 'task_uuid' in result.get('data', {}):
uuid = result['data']['task_uuid'];
# LL result available
if 'score' in result.get('data', {}):
result = self.fetchTaskResult(uuid, fileName)
if result == False:
raise Exception("Error: Result not available")
self.reportResultToBit9(pa, result['data'])
if uuid in self.ll_tasks:
del self.ll_tasks[uuid]
return
except analysis_apiclient.FileNotAvailableError:
# file has not already been submitted to the device, need to submit
if pa['uploaded'] == 1:
# We have file and now we will upload it to LL
uuid = self.uploadFileToLL(pa)
else:
# if we end here, it means that LL doesn't have file, and Bit9 hasn't uploaded it yet from the agent
# we will come back again in 30 seconds
uuid = None
if uuid is not None:
# Remember uuid since LL wants use to use it for future references to the file
# We will try again in 1 hour (per LL best practices)
self.bit9_pending_analysis[md5] = uuid
self.ll_tasks[uuid] = "pending"
except Exception as ex:
logging.error(ex)
# Report to Bit9 that we had error analyzing this file. This means we will not try analysis again.
pa['analysisStatus'] = 4 # (status: Error)
pa['analysisError'] = 'Lastline %s' % str(ex)
# Update Bit9 status for this file
self.b9_api.update('v1/pendingAnalysis', pa)
# -------------------------------------------------------------------------------------------------
# Main body of the script
b9_api = bit9api.bit9Api(
server = 'https://B9_SERVER',
ssl_verify = False, # Validate cert against CA
token = 'B9_API_TOKEN' # Need to add B9 API token here
)
ll_api = LastlineAPI(
url = 'https://analysis.lastline.com',
key = 'LL_API_KEY', # Need to add Lastline API key here
token = 'LL_API_TOKEN', # Need to add Lastline API token here
strong_cert = False) # Validate cert against CA
# Need to specify an existing accessible path here (such as c:\\test\\)
connector = LastlineConnector(b9_api, ll_api, download_file_location="c:\\test\\")
connector.start()
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import pandas as pd
from . import dtypes
from . import duck_array_ops
from . import nputils
from . import ops
from .combine import concat
from .common import (
ImplementsArrayReduce, ImplementsDatasetReduce,
)
from .pycompat import range, zip, integer_types
from .utils import hashable, peek_at, maybe_wrap_array, safe_cast_to_index
from .variable import as_variable, Variable, IndexVariable
def unique_value_groups(ar, sort=True):
"""Group an array by its unique values.
Parameters
----------
ar : array-like
Input array. This will be flattened if it is not already 1-D.
sort : boolean, optional
Whether or not to sort unique values.
Returns
-------
values : np.ndarray
Sorted, unique values as returned by `np.unique`.
indices : list of lists of int
Each element provides the integer indices in `ar` with values given by
the corresponding value in `unique_values`.
"""
inverse, values = pd.factorize(ar, sort=sort)
groups = [[] for _ in range(len(values))]
for n, g in enumerate(inverse):
if g >= 0:
# pandas uses -1 to mark NaN, but doesn't include them in values
groups[g].append(n)
return values, groups
def _dummy_copy(xarray_obj):
from .dataset import Dataset
from .dataarray import DataArray
if isinstance(xarray_obj, Dataset):
res = Dataset(dict((k, dtypes.get_fill_value(v.dtype))
for k, v in xarray_obj.data_vars.items()),
dict((k, dtypes.get_fill_value(v.dtype))
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims),
xarray_obj.attrs)
elif isinstance(xarray_obj, DataArray):
res = DataArray(dtypes.get_fill_value(xarray_obj.dtype),
dict((k, dtypes.get_fill_value(v.dtype))
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims),
dims=[],
name=xarray_obj.name,
attrs=xarray_obj.attrs)
else: # pragma: no cover
raise AssertionError
return res
def _is_one_or_none(obj):
return obj == 1 or obj is None
def _consolidate_slices(slices):
"""Consolidate adjacent slices in a list of slices.
"""
result = []
last_slice = slice(None)
for slice_ in slices:
if not isinstance(slice_, slice):
raise ValueError('list element is not a slice: %r' % slice_)
if (result and last_slice.stop == slice_.start
and _is_one_or_none(last_slice.step)
and _is_one_or_none(slice_.step)):
last_slice = slice(last_slice.start, slice_.stop, slice_.step)
result[-1] = last_slice
else:
result.append(slice_)
last_slice = slice_
return result
def _inverse_permutation_indices(positions):
"""Like inverse_permutation, but also handles slices.
Parameters
----------
positions : list of np.ndarray or slice objects.
If slice objects, all are assumed to be slices.
Returns
-------
np.ndarray of indices or None, if no permutation is necessary.
"""
if not positions:
return None
if isinstance(positions[0], slice):
positions = _consolidate_slices(positions)
if positions == slice(None):
return None
positions = [np.arange(sl.start, sl.stop, sl.step) for sl in positions]
indices = nputils.inverse_permutation(np.concatenate(positions))
return indices
class _DummyGroup(object):
"""Class for keeping track of grouped dimensions without coordinates.
Should not be user visible.
"""
def __init__(self, obj, name, coords):
self.name = name
self.coords = coords
self.dims = (name,)
self.ndim = 1
self.size = obj.sizes[name]
self.values = range(self.size)
def _ensure_1d(group, obj):
if group.ndim != 1:
# try to stack the dims of the group into a single dim
orig_dims = group.dims
stacked_dim = 'stacked_' + '_'.join(orig_dims)
# these dimensions get created by the stack operation
inserted_dims = [dim for dim in group.dims if dim not in group.coords]
# the copy is necessary here, otherwise read only array raises error
# in pandas: https://github.com/pydata/pandas/issues/12813
group = group.stack(**{stacked_dim: orig_dims}).copy()
obj = obj.stack(**{stacked_dim: orig_dims})
else:
stacked_dim = None
inserted_dims = []
return group, obj, stacked_dim, inserted_dims
def _unique_and_monotonic(group):
if isinstance(group, _DummyGroup):
return True
else:
index = safe_cast_to_index(group)
return index.is_unique and index.is_monotonic
class GroupBy(object):
"""A object that implements the split-apply-combine pattern.
Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over
(unique_value, grouped_array) pairs, but the main way to interact with a
groupby object are with the `apply` or `reduce` methods. You can also
directly call numpy methods like `mean` or `std`.
You should create a GroupBy object by using the `DataArray.groupby` or
`Dataset.groupby` methods.
See Also
--------
Dataset.groupby
DataArray.groupby
"""
def __init__(self, obj, group, squeeze=False, grouper=None, bins=None,
cut_kwargs={}):
"""Create a GroupBy object
Parameters
----------
obj : Dataset or DataArray
Object to group.
group : DataArray
Array with the group values.
squeeze : boolean, optional
If "group" is a coordinate of object, `squeeze` controls whether
the subarrays have a dimension of length 1 along that coordinate or
if the dimension is squeezed out.
grouper : pd.Grouper, optional
Used for grouping values along the `group` array.
bins : array-like, optional
If `bins` is specified, the groups will be discretized into the
specified bins by `pandas.cut`.
cut_kwargs : dict, optional
Extra keyword arguments to pass to `pandas.cut`
"""
from .dataarray import DataArray
if grouper is not None and bins is not None:
raise TypeError("can't specify both `grouper` and `bins`")
if not isinstance(group, (DataArray, IndexVariable)):
if not hashable(group):
raise TypeError('`group` must be an xarray.DataArray or the '
'name of an xarray variable or dimension')
group = obj[group]
if group.name not in obj and group.name in obj.dims:
# DummyGroups should not appear on groupby results
group = _DummyGroup(obj, group.name, group.coords)
if getattr(group, 'name', None) is None:
raise ValueError('`group` must have a name')
group, obj, stacked_dim, inserted_dims = _ensure_1d(group, obj)
group_dim, = group.dims
expected_size = obj.sizes[group_dim]
if group.size != expected_size:
raise ValueError('the group variable\'s length does not '
'match the length of this variable along its '
'dimension')
full_index = None
if bins is not None:
binned = pd.cut(group.values, bins, **cut_kwargs)
new_dim_name = group.name + '_bins'
group = DataArray(binned, group.coords, name=new_dim_name)
full_index = binned.categories
if grouper is not None:
index = safe_cast_to_index(group)
if not index.is_monotonic:
# TODO: sort instead of raising an error
raise ValueError('index must be monotonic for resampling')
s = pd.Series(np.arange(index.size), index)
first_items = s.groupby(grouper).first()
if first_items.isnull().any():
full_index = first_items.index
first_items = first_items.dropna()
sbins = first_items.values.astype(np.int64)
group_indices = ([slice(i, j)
for i, j in zip(sbins[:-1], sbins[1:])] +
[slice(sbins[-1], None)])
unique_coord = IndexVariable(group.name, first_items.index)
elif group.dims == (group.name,) and _unique_and_monotonic(group):
# no need to factorize
group_indices = np.arange(group.size)
if not squeeze:
# use slices to do views instead of fancy indexing
# equivalent to: group_indices = group_indices.reshape(-1, 1)
group_indices = [slice(i, i + 1) for i in group_indices]
unique_coord = group
else:
# look through group to find the unique values
unique_values, group_indices = unique_value_groups(
safe_cast_to_index(group), sort=(bins is None))
unique_coord = IndexVariable(group.name, unique_values)
# specification for the groupby operation
self._obj = obj
self._group = group
self._group_dim = group_dim
self._group_indices = group_indices
self._unique_coord = unique_coord
self._stacked_dim = stacked_dim
self._inserted_dims = inserted_dims
self._full_index = full_index
# cached attributes
self._groups = None
@property
def groups(self):
# provided to mimic pandas.groupby
if self._groups is None:
self._groups = dict(zip(self._unique_coord.values,
self._group_indices))
return self._groups
def __len__(self):
return self._unique_coord.size
def __iter__(self):
return zip(self._unique_coord.values, self._iter_grouped())
def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self._group_indices:
yield self._obj.isel(**{self._group_dim: indices})
def _infer_concat_args(self, applied_example):
if self._group_dim in applied_example.dims:
coord = self._group
positions = self._group_indices
else:
coord = self._unique_coord
positions = None
dim, = coord.dims
if isinstance(coord, _DummyGroup):
coord = None
return coord, dim, positions
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
g = f if not reflexive else lambda x, y: f(y, x)
applied = self._yield_binary_applied(g, other)
combined = self._combine(applied)
return combined
return func
def _yield_binary_applied(self, func, other):
dummy = None
for group_value, obj in self:
try:
other_sel = other.sel(**{self._group.name: group_value})
except AttributeError:
raise TypeError('GroupBy objects only support binary ops '
'when the other argument is a Dataset or '
'DataArray')
except (KeyError, ValueError):
if self._group.name not in other.dims:
raise ValueError('incompatible dimensions for a grouped '
'binary operation: the group variable %r '
'is not a dimension on the other argument'
% self._group.name)
if dummy is None:
dummy = _dummy_copy(other)
other_sel = dummy
result = func(obj, other_sel)
yield result
def _maybe_restore_empty_groups(self, combined):
"""Our index contained empty groups (e.g., from a resampling). If we
reduced on that dimension, we want to restore the full index.
"""
if (self._full_index is not None and
self._group.name in combined.dims):
indexers = {self._group.name: self._full_index}
combined = combined.reindex(**indexers)
return combined
def _maybe_unstack(self, obj):
"""This gets called if we are applying on an array with a
multidimensional group."""
if self._stacked_dim is not None and self._stacked_dim in obj.dims:
obj = obj.unstack(self._stacked_dim)
for dim in self._inserted_dims:
if dim in obj.coords:
del obj.coords[dim]
return obj
def fillna(self, value):
"""Fill missing values in this object by group.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : valid type for the grouped object's fillna method
Used to fill all matching missing values by group.
Returns
-------
same type as the grouped object
See also
--------
Dataset.fillna
DataArray.fillna
"""
out = ops.fillna(self, value)
return out
def where(self, cond, other=dtypes.NA):
"""Return elements from `self` or `other` depending on `cond`.
Parameters
----------
cond : DataArray or Dataset with boolean dtype
Locations at which to preserve this objects values.
other : scalar, DataArray or Dataset, optional
Value to use for locations in this object where ``cond`` is False.
By default, inserts missing values.
Returns
-------
same type as the grouped object
See also
--------
Dataset.where
"""
return ops.where_method(self, cond, other)
def _first_or_last(self, op, skipna, keep_attrs):
if isinstance(self._group_indices[0], integer_types):
# NB. this is currently only used for reductions along an existing
# dimension
return self._obj
return self.reduce(op, self._group_dim, skipna=skipna,
keep_attrs=keep_attrs, allow_lazy=True)
def first(self, skipna=None, keep_attrs=True):
"""Return the first element of each group along the group dimension
"""
return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)
def last(self, skipna=None, keep_attrs=True):
"""Return the last element of each group along the group dimension
"""
return self._first_or_last(duck_array_ops.last, skipna, keep_attrs)
def assign_coords(self, **kwargs):
"""Assign coordinates by group.
See also
--------
Dataset.assign_coords
"""
return self.apply(lambda ds: ds.assign_coords(**kwargs))
def _maybe_reorder(xarray_obj, dim, positions):
order = _inverse_permutation_indices(positions)
if order is None:
return xarray_obj
else:
return xarray_obj[{dim: order}]
class DataArrayGroupBy(GroupBy, ImplementsArrayReduce):
"""GroupBy object specialized to grouping DataArray objects
"""
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields Variables without
metadata
"""
var = self._obj.variable
for indices in self._group_indices:
yield var[{self._group_dim: indices}]
def _concat_shortcut(self, applied, dim, positions=None):
# nb. don't worry too much about maintaining this method -- it does
# speed things up, but it's not very interpretable and there are much
# faster alternatives (e.g., doing the grouped aggregation in a
# compiled language)
stacked = Variable.concat(applied, dim, shortcut=True)
reordered = _maybe_reorder(stacked, dim, positions)
result = self._obj._replace_maybe_drop_dims(reordered)
return result
def _restore_dim_order(self, stacked):
def lookup_order(dimension):
if dimension == self._group.name:
dimension, = self._group.dims
if dimension in self._obj.dims:
axis = self._obj.get_axis_num(dimension)
else:
axis = 1e6 # some arbitrarily high value
return axis
new_order = sorted(stacked.dims, key=lookup_order)
return stacked.transpose(*new_order)
def apply(self, func, shortcut=False, **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (maybe_wrap_array(arr, func(arr, **kwargs))
for arr in grouped)
return self._combine(applied, shortcut=shortcut)
def _combine(self, applied, shortcut=False):
"""Recombine the applied objects like the original."""
applied_example, applied = peek_at(applied)
coord, dim, positions = self._infer_concat_args(applied_example)
if shortcut:
combined = self._concat_shortcut(applied, dim, positions)
else:
combined = concat(applied, dim)
combined = _maybe_reorder(combined, dim, positions)
if isinstance(combined, type(self._obj)):
# only restore dimension order for arrays
combined = self._restore_dim_order(combined)
if coord is not None:
if shortcut:
combined._coords[coord.name] = as_variable(coord)
else:
combined.coords[coord.name] = coord
combined = self._maybe_restore_empty_groups(combined)
combined = self._maybe_unstack(combined)
return combined
def reduce(self, func, dim=None, axis=None, keep_attrs=False,
shortcut=True, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_array(ar):
return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)
return self.apply(reduce_array, shortcut=shortcut)
ops.inject_reduce_methods(DataArrayGroupBy)
ops.inject_binary_ops(DataArrayGroupBy)
class DatasetGroupBy(GroupBy, ImplementsDatasetReduce):
def apply(self, func, **kwargs):
"""Apply a function over each Dataset in the group and concatenate them
together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset or DataArray
The result of splitting, applying and combining this dataset.
"""
kwargs.pop('shortcut', None) # ignore shortcut if set (for now)
applied = (func(ds, **kwargs) for ds in self._iter_grouped())
return self._combine(applied)
def _combine(self, applied):
"""Recombine the applied objects like the original."""
applied_example, applied = peek_at(applied)
coord, dim, positions = self._infer_concat_args(applied_example)
combined = concat(applied, dim)
combined = _maybe_reorder(combined, dim, positions)
if coord is not None:
combined[coord.name] = coord
combined = self._maybe_restore_empty_groups(combined)
combined = self._maybe_unstack(combined)
return combined
def reduce(self, func, dim=None, keep_attrs=False, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_dataset(ds):
return ds.reduce(func, dim, keep_attrs, **kwargs)
return self.apply(reduce_dataset)
def assign(self, **kwargs):
"""Assign data variables by group.
See also
--------
Dataset.assign
"""
return self.apply(lambda ds: ds.assign(**kwargs))
ops.inject_reduce_methods(DatasetGroupBy)
ops.inject_binary_ops(DatasetGroupBy)
|
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import time
import pyscf.pbc.tools as tools
from mpi4py import MPI
from pyscf.lib import logger
from pyscf.pbc.mpitools import mpi_load_balancer
from pyscf import lib
from pyscf.pbc import lib as pbclib
from pyscf.pbc.tools.tril import tril_index, unpack_tril
comm = MPI.COMM_WORLD
#einsum = np.einsum
einsum = lib.einsum
dot = np.dot
# This is restricted (R)CCSD
# Ref: Hirata et al., J. Chem. Phys. 120, 2581 (2004)
### Eqs. (37)-(39) "kappa"
def cc_tau1(cc,t1,t2,eris,feri2=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
BLKSIZE = (1,1,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
tmp_oovv_shape = BLKSIZE + (nocc,nocc,nvir,nvir)
tmp_oovv = np.empty(tmp_oovv_shape,dtype=t1.dtype)
tmp2_oovv = np.zeros_like(tmp_oovv)
tau1_ooVv = feri2['tau1_ooVv']
tau1_oOvv = feri2['tau1_oOvv']
tau1_oovv_rev = feri2['tau1_oovv_rev']
tau2_Oovv = feri2['tau2_Oovv']
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
for iterki, ki in enumerate(ranges0):
for iterkj, kj in enumerate(ranges1):
for iterka, ka in enumerate(ranges2):
kb = kconserv[ki,ka,kj]
tmp_oovv[iterki,iterkj,iterka] = t2[ki,kj,ka].copy()
tmp2_oovv[iterki,iterkj,iterka] *= 0.0
if ki == ka and kj == kb:
tmp2_oovv[iterki,iterkj,iterka] = einsum('ia,jb->ijab',t1[ki],t1[kj])
tau1_oovv_rev[kj,ka,kb] = (tmp_oovv[iterki,iterkj,iterka] + tmp2_oovv[iterki,iterkj,iterka])
tau1_ooVv[min(ranges0):max(ranges0)+1,min(ranges1):max(ranges1)+1,nvir*min(ranges2):nvir*(max(ranges2)+1)] = \
( tmp_oovv[:len(ranges0),:len(ranges1),:len(ranges2)] +
tmp2_oovv[:len(ranges0),:len(ranges1),:len(ranges2)] ).transpose(0,1,2,5,3,4,6).reshape(len(ranges0),len(ranges1),len(ranges2)*nvir,nocc,nocc,nvir)
tau1_oOvv[min(ranges0):max(ranges0)+1,min(ranges2):max(ranges2)+1,nocc*min(ranges1):nocc*(max(ranges1)+1)] = \
( tmp_oovv[:len(ranges0),:len(ranges1),:len(ranges2)] +
tmp2_oovv[:len(ranges0),:len(ranges1),:len(ranges2)] ).transpose(0,2,1,4,3,5,6).reshape(len(ranges0),len(ranges2),len(ranges1)*nocc,nocc,nvir,nvir)
tau2_Oovv[min(ranges1):max(ranges1)+1,min(ranges2):max(ranges2)+1,nocc*min(ranges0):nocc*(max(ranges0)+1)] = \
( tmp_oovv[:len(ranges0),:len(ranges1),:len(ranges2)] +
2*tmp2_oovv[:len(ranges0),:len(ranges1),:len(ranges2)] ).transpose(1,2,0,3,4,5,6).reshape(len(ranges1),len(ranges2),len(ranges0)*nocc,nocc,nvir,nvir)
loader.slave_finished()
comm.Barrier()
return
def cc_Foo(cc,t1,t2,eris,feri2=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
Fki = np.empty((nkpts,nocc,nocc),dtype=t2.dtype)
for ki in range(nkpts):
kk = ki
Fki[ki] = eris.fock[ki,:nocc,:nocc].copy()
for kl in range(nkpts):
for kc in range(nkpts):
#Fki[ki] += einsum('lkcd,licd->ki',eris.SoOvv[kk,kc],tau1_oOvv[ki,kc])
kd = kconserv[kk,kc,kl]
Soovv = 2*eris.oovv[kk,kl,kc] - eris.oovv[kk,kl,kd].transpose(0,1,3,2)
#Fki[ki] += einsum('klcd,ilcd->ki',Soovv,t2[ki,kl,kc])
Fki[ki] += einsum('klcd,ilcd->ki',Soovv,unpack_tril(t2,nkpts,ki,kl,kc,kconserv[ki,kc,kl]))
#if ki == kc:
kd = kconserv[kk,ki,kl]
Soovv = 2*eris.oovv[kk,kl,ki] - eris.oovv[kk,kl,kd].transpose(0,1,3,2)
Fki[ki] += einsum('klcd,ic,ld->ki',Soovv,t1[ki],t1[kl])
return Fki
def cc_Fvv(cc,t1,t2,eris,feri2=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
Fac = np.empty((nkpts,nvir,nvir),dtype=t2.dtype)
for ka in range(nkpts):
kc = ka
Fac[ka] = eris.fock[ka,nocc:,nocc:].copy()
#for kk in range(nkpts):
# Fac[ka] += -einsum('lkcd,lkad->ac',eris.SoOvv[kk,kc],tau1_oOvv[kk,ka])
for kl in range(nkpts):
for kk in range(nkpts):
kd = kconserv[kk,kc,kl]
Soovv = 2*eris.oovv[kk,kl,kc] - eris.oovv[kk,kl,kd].transpose(0,1,3,2)
#Fac[ka] += -einsum('klcd,klad->ac',Soovv,t2[kk,kl,ka])
Fac[ka] += -einsum('klcd,klad->ac',Soovv,unpack_tril(t2,nkpts,kk,kl,ka,kconserv[kk,ka,kl]))
#if kk == ka
kd = kconserv[ka,kc,kl]
Soovv = 2*eris.oovv[ka,kl,kc] - eris.oovv[ka,kl,kd].transpose(0,1,3,2)
Fac[ka] += -einsum('klcd,ka,ld->ac',Soovv,t1[ka],t1[kl])
return Fac
def cc_Fov(cc,t1,t2,eris,feri2=None):
nkpts, nocc, nvir = t1.shape
Fkc = np.empty((nkpts,nocc,nvir),dtype=t2.dtype)
Fkc[:] = eris.fock[:,:nocc,nocc:].copy()
for kk in range(nkpts):
for kl in range(nkpts):
Soovv = 2.*eris.oovv[kk,kl,kk] - eris.oovv[kk,kl,kl].transpose(0,1,3,2)
Fkc[kk] += einsum('klcd,ld->kc',Soovv,t1[kl])
return Fkc
### Eqs. (40)-(41) "lambda"
def Loo(cc,t1,t2,eris,feri2=None):
nkpts, nocc, nvir = t1.shape
fov = eris.fock[:,:nocc,nocc:]
Lki = cc_Foo(cc,t1,t2,eris,feri2)
for ki in range(nkpts):
Lki[ki] += einsum('kc,ic->ki',fov[ki],t1[ki])
SoOov = (2*eris.ooov[ki,:,ki] - eris.ooov[:,ki,ki].transpose(0,2,1,3,4)).transpose(0,2,1,3,4).reshape(nkpts*nocc,nocc,nocc,nvir)
Lki[ki] += einsum('lkic,lc->ki',SoOov,t1.reshape(nkpts*nocc,nvir))
return Lki
def Lvv(cc,t1,t2,eris,feri2=None):
nkpts, nocc, nvir = t1.shape
fov = eris.fock[:,:nocc,nocc:]
Lac = cc_Fvv(cc,t1,t2,eris,feri2)
for ka in range(nkpts):
Lac[ka] += -einsum('kc,ka->ac',fov[ka],t1[ka])
for kk in range(nkpts):
Svovv = 2*eris.ovvv[kk,ka,kk].transpose(1,0,3,2) - eris.ovvv[kk,ka,ka].transpose(1,0,2,3)
Lac[ka] += einsum('akcd,kd->ac',Svovv,t1[kk])
return Lac
### Eqs. (42)-(45) "chi"
def cc_Woooo(cc,t1,t2,eris,feri2=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
khelper = cc.khelper
#Wklij = np.array(eris.oooo, copy=True)
#for pqr in range(nUnique_klist):
# kk, kl, ki = unique_klist[pqr]
BLKSIZE = (1,1,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
oooo_tmp_shape = BLKSIZE + (nocc,nocc,nocc,nocc)
oooo_tmp = np.empty(shape=oooo_tmp_shape,dtype=t1.dtype)
tau1_ooVv = feri2['tau1_ooVv']
#Woooo = feri2['Woooo']
Woooo_rev = feri2['Woooo_rev']
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
for iterkk, kk in enumerate(ranges0):
for iterkl, kl in enumerate(ranges1):
for iterki, ki in enumerate(ranges2):
kj = kconserv[kk,ki,kl]
oooo_tmp[iterkk,iterkl,iterki] = np.array(eris.oooo[kk,kl,ki],copy=True)
oooo_tmp[iterkk,iterkl,iterki] += einsum('klic,jc->klij',eris.ooov[kk,kl,ki],t1[kj])
oooo_tmp[iterkk,iterkl,iterki] += einsum('klcj,ic->klij',eris.ooov[kl,kk,kj].transpose(1,0,3,2),t1[ki])
# Note the indices and way the tau1 is stored : instead of a loop over kpt='kc' and
# loop over mo='c', the (kc,k,l,c,d) index is changed instead to (nkpts*nvir,k,l,d)
# so that we only have to loop over the first index, saving read operations.
oooo_tmp[iterkk,iterkl,iterki] += einsum('ckld,cijd->klij',eris.ooVv[kk,kl],tau1_ooVv[ki,kj])
#for kc in range(nkpts):
# oooo_tmp[iterkk,iterkl,iterki] += einsum('klcd,ijcd->klij',eris.oovv[kk,kl,kc],t2[ki,kj,kc])
#oooo_tmp[iterkk,iterkl,iterki] += einsum('klcd,ic,jd->klij',eris.oovv[kk,kl,ki],t1[ki],t1[kj])
#Woooo[kk,kl,ki] = oooo_tmp[iterkk,iterkl,iterki]
#Woooo[kl,kk,kj] = oooo_tmp[iterkk,iterkl,iterki].transpose(1,0,3,2)
Woooo_rev[kl,ki,kj] = oooo_tmp[iterkk,iterkl,iterki]
#Woooo[min(ranges0):max(ranges0)+1,min(ranges1):max(ranges1)+1,min(ranges2):max(ranges2)+1] = \
# oooo_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
# for if you want to take into account symmetry of Woooo integral
#feri2.Woooo[min(ranges1):max(ranges1)+1,min(ranges0):max(ranges0)+1,min(ranges2):max(ranges2)+1] = \
# oooo_tmp[:len(ranges0),:len(ranges1),:len(ranges2)].transpose(0,1,2,4,3,6,5)
loader.slave_finished()
comm.Barrier()
return
def cc_Wvvvv(cc,t1,t2,eris,feri2=None):
## Slow:
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
vvvv_tmp = np.empty((nvir,nvir,nvir,nvir),dtype=t1.dtype)
loader = mpi_load_balancer.load_balancer(BLKSIZE=(1,nkpts,))
loader.set_ranges((range(nkpts),range(nkpts),))
Wvvvv = feri2['Wvvvv']
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1 = loader.get_blocks_from_data(data)
for ka in ranges0:
for kc in ranges1:
for kb in range(ka+1):
kd = kconserv[ka,kc,kb]
vvvv_tmp = np.array(eris.vvvv[ka,kb,kc],copy=True)
vvvv_tmp += einsum('akcd,kb->abcd',eris.ovvv[kb,ka,kd].transpose(1,0,3,2),-t1[kb])
vvvv_tmp += einsum('kbcd,ka->abcd',eris.ovvv[ka,kb,kc],-t1[ka])
Wvvvv[ka,kb,kc] = vvvv_tmp
Wvvvv[kb,ka,kd] = vvvv_tmp.transpose(1,0,3,2)
loader.slave_finished()
## Fast
#nocc,nvir = t1.shape
#Wabcd = np.empty((nvir,)*4)
#for a in range(nvir):
# Wabcd[a,:] = einsum('kcd,kb->bcd',eris.vovv[a],-t1)
##Wabcd += einsum('kbcd,ka->abcd',eris.ovvv,-t1)
#Wabcd += lib.dot(-t1.T,eris.ovvv.reshape(nocc,-1)).reshape((nvir,)*4)
#Wabcd += np.asarray(eris.vvvv)
comm.Barrier()
return
def cc_Wvoov(cc,t1,t2,eris,feri2=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
#Wakic = np.empty((nkpts,nkpts,nkpts,nvir,nocc,nocc,nvir),dtype=t1.dtype)
BLKSIZE = (1,1,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
voov_tmp_shape = BLKSIZE + (nvir,nocc,nocc,nvir)
voov_tmp = np.empty(voov_tmp_shape,dtype=t1.dtype)
tau2_Oovv = feri2['tau2_Oovv']
#Wvoov = feri2['Wvoov']
WvOov = feri2['WvOov']
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
ix = sum([[min(x),max(x)+1] for x in (ranges0,ranges1,ranges2)], [])
#eris_ooov = eris.ooov[ix[0]:ix[1], ix[2]:ix[3], ix[4]:ix[5]]
for iterka, ka in enumerate(ranges0):
for iterkk, kk in enumerate(ranges1):
for iterki, ki in enumerate(ranges2):
kc = kconserv[ka,ki,kk]
voov_tmp[iterka,iterkk,iterki] = np.array(eris.ovvo[kk,ka,kc]).transpose(1,0,3,2)
voov_tmp[iterka,iterkk,iterki] -= einsum('lkic,la->akic',eris.ooov[ka,kk,ki],t1[ka])
voov_tmp[iterka,iterkk,iterki] += einsum('akdc,id->akic',eris.ovvv[kk,ka,kc].transpose(1,0,3,2),t1[ki])
# Beginning of change
#for kl in range(nkpts):
# # kl - kd + kk = kc
# # => kd = kl - kc + kk
# kd = kconserv[kl,kc,kk]
# Soovv = 2*np.array(eris.oovv[kl,kk,kd]) - np.array(eris.oovv[kl,kk,kc]).transpose(0,1,3,2)
# voov_tmp[iterka,iterkk,iterki] += 0.5*einsum('lkdc,ilad->akic',Soovv,t2[ki,kl,ka])
# voov_tmp[iterka,iterkk,iterki] -= 0.5*einsum('lkdc,ilda->akic',eris.oovv[kl,kk,kd],t2[ki,kl,kd])
#voov_tmp[iterka,iterkk,iterki] -= einsum('lkdc,id,la->akic',eris.oovv[ka,kk,ki],t1[ki],t1[ka])
#Wvoov[ka,kk,ki] = voov_tmp[iterka,iterkk,iterki]
# Making various intermediates...
#t2_oOvv = t2[ki,:,ka].transpose(0,2,1,3,4).reshape(nkpts*nocc,nocc,nvir,nvir)
t2_oOvv = unpack_tril(t2,nkpts,ki,range(nkpts),ka,kconserv[ki,ka,range(nkpts)]).transpose(0,2,1,3,4).reshape(nkpts*nocc,nocc,nvir,nvir)
#eris_oOvv = eris.oovv[kk,:,kc].transpose(0,2,1,3,4).reshape(nkpts*nocc,nocc,nvir,nvir)
voov_tmp[iterka,iterkk,iterki] += 0.5*einsum('lkcd,liad->akic',eris.SoOvv[kk,kc],t2_oOvv)
voov_tmp[iterka,iterkk,iterki] -= 0.5*einsum('lkcd,liad->akic',eris.oOvv[kk,kc],tau2_Oovv[ki,ka])
# End of change
#Wvoov[min(ranges0):max(ranges0)+1,min(ranges1):max(ranges1)+1,min(ranges2):max(ranges2)+1] = \
# voov_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
WvOov[min(ranges0):max(ranges0)+1,min(ranges2):max(ranges2)+1,nocc*min(ranges1):nocc*(max(ranges1)+1)] = \
voov_tmp[:len(ranges0),:len(ranges1),:len(ranges2)].transpose(0,2,1,4,3,5,6).reshape(len(ranges1),len(ranges2),len(ranges0)*nocc,nvir,nocc,nvir)
loader.slave_finished()
comm.Barrier()
return
def cc_Wvovo(cc,t1,t2,eris,feri2=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
BLKSIZE = (1,1,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
vovo_tmp_shape = BLKSIZE + (nvir,nocc,nvir,nocc)
vovo_tmp = np.empty(shape=vovo_tmp_shape,dtype=t1.dtype)
Wvovo = feri2['Wvovo']
WvOVo = feri2['WvOVo']
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
for iterka, ka in enumerate(ranges0):
for iterkk, kk in enumerate(ranges1):
for iterkc, kc in enumerate(ranges2):
ki = kconserv[ka,kc,kk]
vovo_tmp[iterka,iterkk,iterkc] = np.array(eris.ovov[kk,ka,ki]).transpose(1,0,3,2)
vovo_tmp[iterka,iterkk,iterkc] -= einsum('lkci,la->akci',eris.ooov[kk,ka,ki].transpose(1,0,3,2),t1[ka])
vovo_tmp[iterka,iterkk,iterkc] += einsum('akcd,id->akci',eris.ovvv[kk,ka,ki].transpose(1,0,3,2),t1[ki])
# Beginning of change
#for kl in range(nkpts):
# kd = kconserv[kl,kc,kk]
# vovo_tmp[iterka,iterkk,iterkc] -= 0.5*einsum('lkcd,ilda->akci',eris.oovv[kl,kk,kc],t2[ki,kl,kd])
#vovo_tmp[iterka,iterkk,iterkc] -= einsum('lkcd,id,la->akci',eris.oovv[ka,kk,kc],t1[ki],t1[ka])
#Wvovo[ka,kk,kc] = vovo_tmp[iterka,iterkk,iterkc]
oovvf = eris.oovv[:,kk,kc].reshape(nkpts*nocc,nocc,nvir,nvir)
#t2f = t2[:,ki,ka].copy() #This is a tau like term
t2f = unpack_tril(t2,nkpts,range(nkpts),ki,ka,kconserv[range(nkpts),ka,ki]).copy() #This is a tau like term
#for kl in range(nkpts):
# kd = kconserv[kl,kc,kk]
# if ki == kd and kl == ka:
# t2f[kl] += 2*einsum('id,la->liad',t1[ki],t1[ka])
kd = kconserv[ka,kc,kk]
t2f[ka] += 2*einsum('id,la->liad',t1[kd],t1[ka])
t2f = t2f.reshape(nkpts*nocc,nocc,nvir,nvir)
vovo_tmp[iterka,iterkk,iterkc] -= 0.5*einsum('lkcd,liad->akci',oovvf,t2f)
Wvovo[min(ranges0):max(ranges0)+1,min(ranges1):max(ranges1)+1,min(ranges2):max(ranges2)+1] = \
vovo_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
WvOVo[min(ranges0):max(ranges0)+1,nocc*min(ranges1):nocc*(max(ranges1)+1),nvir*min(ranges2):nvir*(max(ranges2)+1)] = \
vovo_tmp[:len(ranges0),:len(ranges1),:len(ranges2)].transpose(0,1,4,2,5,3,6).reshape(len(ranges0),len(ranges1)*nocc,len(ranges2)*nvir,nvir,nocc)
# End of change
loader.slave_finished()
comm.Barrier()
return
def cc_Wovov(cc,t1,t2,eris,feri2=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
BLKSIZE = (1,1,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
ovov_tmp_shape = BLKSIZE + (nocc,nvir,nocc,nvir)
ovov_tmp = np.empty(shape=ovov_tmp_shape,dtype=t1.dtype)
#Wovov = feri2['Wovov']
WOvov = feri2['WOvov']
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
for iterkk, kk in enumerate(ranges0):
for iterka, ka in enumerate(ranges1):
for iterki, ki in enumerate(ranges2):
kc = kconserv[kk,ki,ka]
ovov_tmp[iterkk,iterka,iterki] = np.array(eris.ovov[kk,ka,ki],copy=True)
ovov_tmp[iterkk,iterka,iterki] -= einsum('lkci,la->kaic',eris.ooov[kk,ka,ki].transpose(1,0,3,2),t1[ka])
ovov_tmp[iterkk,iterka,iterki] += einsum('akcd,id->kaic',eris.ovvv[kk,ka,ki].transpose(1,0,3,2),t1[ki])
# Beginning of change
#for kl in range(nkpts):
# kd = kconserv[kl,kc,kk]
# ovov_tmp[iterka,iterkk,iterkc] -= 0.5*einsum('lkcd,ilda->akci',eris.oovv[kl,kk,kc],t2[ki,kl,kd])
#ovov_tmp[iterka,iterkk,iterkc] -= einsum('lkcd,id,la->akci',eris.oovv[ka,kk,kc],t1[ki],t1[ka])
#Wvovo[ka,kk,kc] = ovov_tmp[iterka,iterkk,iterkc]
oovvf = eris.oovv[:,kk,kc].reshape(nkpts*nocc,nocc,nvir,nvir)
#t2f = t2[:,ki,ka].copy() #This is a tau like term
t2f = unpack_tril(t2,nkpts,range(nkpts),ki,ka,kconserv[range(nkpts),ka,ki]).copy() #This is a tau like term
#for kl in range(nkpts):
# kd = kconserv[kl,kc,kk]
# if ki == kd and kl == ka:
# t2f[kl] += 2*einsum('id,la->liad',t1[ki],t1[ka])
kd = kconserv[ka,kc,kk]
t2f[ka] += 2*einsum('id,la->liad',t1[kd],t1[ka])
t2f = t2f.reshape(nkpts*nocc,nocc,nvir,nvir)
ovov_tmp[iterkk,iterka,iterki] -= 0.5*einsum('lkcd,liad->kaic',oovvf,t2f)
#Wovov[min(ranges0):max(ranges0)+1,min(ranges1):max(ranges1)+1,min(ranges2):max(ranges2)+1] = \
# ovov_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
WOvov[min(ranges1):max(ranges1)+1,min(ranges2):max(ranges2)+1,nocc*min(ranges0):nocc*(max(ranges0)+1)] = \
ovov_tmp[:len(ranges0),:len(ranges1),:len(ranges2)].transpose(1,2,0,3,4,5,6).reshape(len(ranges1),len(ranges2),len(ranges0)*nocc,nvir,nocc,nvir)
# End of change
loader.slave_finished()
comm.Barrier()
return
# EOM Intermediates w/ k-points
def Wooov(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wklid = np.zeros((nkpts,nkpts,nkpts,nocc,nocc,nocc,nvir),dtype=t2.dtype)
else:
Wklid = fint['Wooov']
# TODO can do much better than this... call recursive function
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
ooov_tmp_size = BLKSIZE + (nocc,nocc,nocc,nvir)
ooov_tmp = np.empty(ooov_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_ooov_kli = _cp(eris.ooov[s0,s1,s2])
eris_oovv_kli = _cp(eris.oovv[s0,s1,s2])
for iterkk,kk in enumerate(ranges0):
for iterkl,kl in enumerate(ranges1):
for iterki,ki in enumerate(ranges2):
kd = kconserv[kk,ki,kl]
ooov_tmp[iterkk,iterkl,iterki] = eris_ooov_kli[iterkk,iterkl,iterki].copy()
ooov_tmp[iterkk,iterkl,iterki] += einsum('ic,klcd->klid',t1[ki],eris_oovv_kli[iterkk,iterkl,iterki])
Wklid[s0,s1,s2] = ooov_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wklid, op=MPI.SUM)
return Wklid
def Wvovv(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Walcd = np.zeros((nkpts,nkpts,nkpts,nvir,nocc,nvir,nvir),dtype=t2.dtype)
else:
Walcd = fint['Wvovv']
# TODO can do much better than this... call recursive function
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nvir*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
vovv_tmp_size = BLKSIZE + (nvir,nocc,nvir,nvir)
vovv_tmp = np.empty(vovv_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_vovv_alc = _cp(eris.vovv[s0,s1,s2])
eris_oovv_alc = _cp(eris.oovv[s0,s1,s2])
for iterka,ka in enumerate(ranges0):
for iterkl,kl in enumerate(ranges1):
for iterkc,kc in enumerate(ranges2):
kd = kconserv[ka,kc,kl]
# vovv[ka,kl,kc,kd] <= ovvv[kl,ka,kd,kc].transpose(1,0,3,2)
vovv_tmp[iterka,iterkl,iterkc] = eris_vovv_alc[iterka,iterkl,iterkc] #np.array(eris.ovvv[kl,ka,kd]).transpose(1,0,3,2)
vovv_tmp[iterka,iterkl,iterkc] += -einsum('ka,klcd->alcd',t1[ka],eris_oovv_alc[iterka,iterkl,iterkc])
Walcd[s0,s1,s2] = vovv_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Walcd, op=MPI.SUM)
return Walcd
def W1ovvo(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkaci = np.zeros((nkpts,nkpts,nkpts,nocc,nvir,nvir,nocc),dtype=t1.dtype)
else:
Wkaci = fint['W1ovvo']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
ovvo_tmp_size = BLKSIZE + (nocc,nvir,nvir,nocc)
ovvo_tmp = np.empty(ovvo_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_ovvo_kac = _cp(eris.ovvo[s0,s1,s2])
eris_oovv_kXc = _cp(eris.oovv[s0,:,s2])
eris_oovv_Xkc = _cp(eris.oovv[:,s0,s2])
for iterkk,kk in enumerate(ranges0):
for iterka,ka in enumerate(ranges1):
for iterkc,kc in enumerate(ranges2):
ki = kconserv[kk,kc,ka]
ovvo_tmp[iterkk,iterka,iterkc] = _cp(eris_ovvo_kac[iterkk,iterka,iterkc])
#St2 = 2.*t2[ki,:,ka]
St2 = 2.*unpack_tril(t2,nkpts,ki,range(nkpts),ka,kconserv[ki,ka,range(nkpts)])
#St2 -= t2[:,ki,ka].transpose(0,2,1,3,4)
St2 -= unpack_tril(t2,nkpts,range(nkpts),ki,ka,kconserv[range(nkpts),ka,ki]).transpose(0,2,1,3,4)
ovvo_tmp[iterkk,iterka,iterkc] += einsum('klcd,ilad->kaci',eris_oovv_kXc[iterkk,:,iterkc].transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir),
St2.transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir))
ovvo_tmp[iterkk,iterka,iterkc] += -einsum('lkcd,ilad->kaci',eris_oovv_Xkc[:,iterkk,iterkc].reshape(nocc*nkpts,nocc,nvir,nvir),
unpack_tril(t2,nkpts,ki,range(nkpts),ka,kconserv[ki,ka,range(nkpts)]).transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir))
# t2[ki,:,ka].transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir))
Wkaci[s0,s1,s2] = ovvo_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkaci, op=MPI.SUM)
return Wkaci
def W2ovvo(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkaci = np.zeros((nkpts,nkpts,nkpts,nocc,nvir,nvir,nocc),dtype=t1.dtype)
WWooov = Wooov(cc,t1,t2,eris)
else:
Wkaci = fint['W2ovvo']
WWooov = fint['Wooov']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nvir*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
ovvo_tmp_size = BLKSIZE + (nocc,nvir,nvir,nocc)
ovvo_tmp = np.empty(ovvo_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
Wooov_akX = _cp(WWooov[s1,s0])
eris_ovvv_kac = _cp(eris.ovvv[s0,s1,s2])
for iterkk,kk in enumerate(ranges0):
for iterka,ka in enumerate(ranges1):
for iterkc,kc in enumerate(ranges2):
ki = kconserv[kk,kc,ka]
ovvo_tmp[iterkk,iterka,iterkc] = einsum('la,lkic->kaci',-t1[ka],Wooov_akX[iterka,iterkk,ki])
ovvo_tmp[iterkk,iterka,iterkc] += einsum('akdc,id->kaci',eris_ovvv_kac[iterkk,iterka,iterkc].transpose(1,0,3,2),t1[ki])
Wkaci[s0,s1,s2] = ovvo_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkaci, op=MPI.SUM)
return Wkaci
def Wovvo(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkaci = np.zeros((nkpts,nkpts,nkpts,nocc,nvir,nvir,nocc),dtype=t2.dtype)
W1kaci = W1ovvo(cc,t1,t2,eris,fint)
W2kaci = W2ovvo(cc,t1,t2,eris,fint)
else:
Wkaci = fint['Wovvo']
W1kaci = fint['W1ovvo']
W2kaci = fint['W2ovvo']
# TODO can do much better than this... call recursive function
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
Wkaci[s0,s1,s2] = _cp(W1kaci[s0,s1,s2]) + _cp(W2kaci[s0,s1,s2])
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkaci, op=MPI.SUM)
return Wkaci
def W1ovov(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkbid = np.zeros((nkpts,nkpts,nkpts,nocc,nvir,nocc,nvir),dtype=t2.dtype)
else:
Wkbid = fint['W1ovov']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
ovov_tmp_size = BLKSIZE + (nocc,nvir,nocc,nvir)
ovov_tmp = np.empty(ovov_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_ovov = _cp(eris.ovov[s0,s1,s2])
for iterkk,kk in enumerate(ranges0):
for iterkb,kb in enumerate(ranges1):
for iterki,ki in enumerate(ranges2):
kd = kconserv[kk,ki,kb]
ovov_tmp[iterkk,iterkb,iterki] = eris_ovov[iterkk,iterkb,iterki].copy()
ovov_tmp[iterkk,iterkb,iterki] += -einsum('lkdc,libc->kbid',eris.oovv[:,kk,kd].reshape(nkpts*nocc,nocc,nvir,nvir),
# t2[:,ki,kb].reshape(nkpts*nocc,nocc,nvir,nvir))
unpack_tril(t2,nkpts,range(nkpts),ki,kb,kconserv[range(nkpts),kb,ki]).reshape(nkpts*nocc,nocc,nvir,nvir))
Wkbid[s0,s1,s2] = ovov_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkbid, op=MPI.SUM)
return Wkbid
def W2ovov(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkbid = np.zeros((nkpts,nkpts,nkpts,nocc,nvir,nocc,nvir),dtype=t2.dtype)
WWooov = Wooov(cc,t1,t2,eris)
else:
Wkbid = fint['W2ovov']
WWooov = fint['Wooov']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nvir*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
ovov_tmp_size = BLKSIZE + (nocc,nvir,nocc,nvir)
ovov_tmp = np.empty(ovov_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_ovvv = _cp(eris.ovvv[s0,s1,s2])
WWooov_kbi = _cp(WWooov[s0,s1,s2])
for iterkk,kk in enumerate(ranges0):
for iterkb,kb in enumerate(ranges1):
for iterki,ki in enumerate(ranges2):
kd = kconserv[kk,ki,kb]
ovov_tmp[iterkk,iterkb,iterki] = einsum('klid,lb->kbid',WWooov_kbi[iterkk,iterkb,iterki],-t1[kb])
ovov_tmp[iterkk,iterkb,iterki] += einsum('kbcd,ic->kbid',eris_ovvv[iterkk,iterkb,iterki],t1[ki])
Wkbid[s0,s1,s2] = ovov_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkbid, op=MPI.SUM)
return Wkbid
def Wovov(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkbid = np.zeros((nkpts,nkpts,nkpts,nocc,nvir,nocc,nvir),dtype=t2.dtype)
WW1ovov = W1ovov(cc,t1,t2,eris)
WW2ovov = W2ovov(cc,t1,t2,eris)
else:
Wkbid = fint['Wovov']
WW1ovov = fint['W1ovov']
WW2ovov = fint['W2ovov']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
ovov_tmp_size = BLKSIZE + (nocc,nvir,nocc,nvir)
ovov_tmp = np.empty(ovov_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
Wkbid[s0,s1,s2] = _cp(WW1ovov[s0,s1,s2]) + _cp(WW2ovov[s0,s1,s2])
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkbid, op=MPI.SUM)
return Wkbid
def WovovRev(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkbid = np.zeros((nkpts,nkpts,nkpts,nocc,nvir,nocc,nvir),dtype=t2.dtype)
WW1ovov = W1ovov(cc,t1,t2,eris)
WW2ovov = W2ovov(cc,t1,t2,eris)
else:
Wkbid = fint['WovovRev']
WW1ovov = fint['W1ovov']
WW2ovov = fint['W2ovov']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
ovov_tmp_size = BLKSIZE + (nocc,nvir,nocc,nvir)
ovov_tmp = np.empty(ovov_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
Wkbid[s2,s1,s0] = (_cp(WW1ovov[s0,s1,s2]) + _cp(WW2ovov[s0,s1,s2])).transpose(2,1,0,3,4,5,6)
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkbid, op=MPI.SUM)
return Wkbid
# This is the same Woooo intermediate used in cc
def Woooo(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wklij = np.zeros((nkpts,nkpts,nkpts,nocc,nocc,nocc,nocc),dtype=t2.dtype)
else:
Wklij = fint['Woooo']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
nkpts_blksize2 = 1
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
oooo_tmp_size = BLKSIZE + (nocc,nocc,nocc,nocc)
oooo_tmp = np.empty(oooo_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_oovv_klX = _cp(eris.oovv[s0,s1,s2])
eris_oooo_kli = _cp(eris.oooo[s0,s1,s2])
eris_ooov_klX = _cp(eris.ooov[s0,s1,s2])
eris_ooov_lkX = _cp(eris.ooov[s1,s0,s2])
for iterkk,kk in enumerate(ranges0):
for iterkl,kl in enumerate(ranges1):
for iterki,ki in enumerate(ranges2):
kj = kconserv[kk,ki,kl]
#tau1 = t2[ki,kj,:].copy()
tau1 = unpack_tril(t2,nkpts,ki,kj,range(nkpts),kconserv[ki,range(nkpts),kj]).copy()
tau1[ki] += einsum('ic,jd->ijcd',t1[ki],t1[kj])
oooo_tmp[iterkk,iterkl,iterki] = eris_oooo_kli[iterkk,iterkl,iterki].copy()
oooo_tmp[iterkk,iterkl,iterki] += einsum('kld,ijd->klij',eris_oovv_klX[iterkk,iterkl,:].transpose(1,2,0,3,4).reshape(nocc,nocc,-1),
tau1.transpose(1,2,0,3,4).reshape(nocc,nocc,-1))
oooo_tmp[iterkk,iterkl,iterki] += einsum('klid,jd->klij',eris_ooov_klX[iterkk,iterkl,ki],t1[kj])
oooo_tmp[iterkk,iterkl,iterki] += einsum('lkjc,ic->klij',eris_ooov_lkX[iterkl,iterkk,kj],t1[ki])
Wklij[s0,s1,s2] = oooo_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wklij, op=MPI.SUM)
return Wklij
# This has different storage compared to Woooo, more amenable to I/O
# Instead of calling Woooo[kk,kl,ki] to get Woooo[kk,kl,ki,kj] you call
# WooooS[kl,ki,kj]
def WooooS(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wklij = np.zeros((nkpts,nkpts,nkpts,nocc,nocc,nocc,nocc),dtype=t2.dtype)
else:
Wklij = fint['WooooS']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
nkpts_blksize2 = 1
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
oooo_tmp_size = BLKSIZE + (nocc,nocc,nocc,nocc)
oooo_tmp = np.empty(oooo_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_oovv_klX = _cp(eris.oovv[s0,s1,s2])
eris_oooo_kli = _cp(eris.oooo[s0,s1,s2])
eris_ooov_klX = _cp(eris.ooov[s0,s1,s2])
eris_ooov_lkX = _cp(eris.ooov[s1,s0,s2])
for iterkk,kk in enumerate(ranges0):
for iterkl,kl in enumerate(ranges1):
for iterki,ki in enumerate(ranges2):
kj = kconserv[kk,ki,kl]
#tau1 = t2[ki,kj,:].copy()
tau1 = unpack_tril(t2,nkpts,ki,kj,range(nkpts),kconserv[ki,range(nkpts),kj]).copy()
tau1[ki] += einsum('ic,jd->ijcd',t1[ki],t1[kj])
oooo_tmp[iterkk,iterkl,iterki] = eris_oooo_kli[iterkk,iterkl,iterki].copy()
oooo_tmp[iterkk,iterkl,iterki] += einsum('kld,ijd->klij',eris_oovv_klX[iterkk,iterkl,:].transpose(1,2,0,3,4).reshape(nocc,nocc,-1),
tau1.transpose(1,2,0,3,4).reshape(nocc,nocc,-1))
oooo_tmp[iterkk,iterkl,iterki] += einsum('klid,jd->klij',eris_ooov_klX[iterkk,iterkl,ki],t1[kj])
oooo_tmp[iterkk,iterkl,iterki] += einsum('lkjc,ic->klij',eris_ooov_lkX[iterkl,iterkk,kj],t1[ki])
Wklij[kl,ki,kj] = oooo_tmp[iterkk,iterkl,iterki]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wklij, op=MPI.SUM)
return Wklij
def Wvvvv(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wabcd = np.zeros((nkpts,nkpts,nkpts,nvir,nvir,nvir,nvir),dtype=t2.dtype)
else:
Wabcd = fint['Wvvvv']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nvir*nvir*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
nkpts_blksize2 = 1
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
vvvv_tmp_size = BLKSIZE + (nvir,nvir,nvir,nvir)
vvvv_tmp = np.empty(vvvv_tmp_size,dtype=t2.dtype)
print("vvvv blksize")
print(BLKSIZE)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_vovv = _cp(eris.vovv[s0,s1,s2])
eris_ovvv = _cp(eris.ovvv[s0,s1,s2])
eris_oovv_abc = _cp(eris.oovv[s0,s1,s2])
vvvv_tmp = _cp(eris.vvvv[s0,s1,s2])
for iterka,ka in enumerate(ranges0):
for iterkb,kb in enumerate(ranges1):
for iterkc,kc in enumerate(ranges2):
kd = kconserv[ka,kc,kb]
vvvv_tmp[iterka,iterkb,iterkc] += einsum('klcd,ka,lb->abcd',eris_oovv_abc[iterka,iterkb,iterkc],t1[ka],t1[kb])
OOvv = np.empty( (nkpts,nocc,nocc,nvir,nvir), dtype=t2.dtype)
t2_tmp = np.empty( (nkpts,nocc,nocc,nvir,nvir), dtype=t2.dtype)
#for kk in range(nkpts):
# # kk + kl - kc - kd = 0
# # => kl = kc - kk + kd
# kl = kconserv[kc,kk,kd]
# vvvv_tmp[iterka,iterkb,iterkc] += einsum('klcd,klab->abcd',eris.oovv[kk,kl,kc],t2[kk,kl,ka])
for kk in range(nkpts):
# kk + kl - kc - kd = 0
kl = kconserv[kc,kk,kd]
OOvv[kk] = eris.oovv[kk,kl,kc]
#t2_tmp[kk] = t2[kk,kl,ka]
t2_tmp[kk] = unpack_tril(t2,nkpts,kk,kl,ka,kconserv[kk,ka,kl])
OOvv = OOvv.reshape(-1,nvir,nvir)
t2_tmp = t2_tmp.reshape(-1,nvir,nvir)
vvvv_tmp[iterka,iterkb,iterkc] += einsum('xcd,xab->abcd',OOvv,t2_tmp)
vvvv_tmp[iterka,iterkb,iterkc] += einsum('alcd,lb->abcd',eris_vovv[iterka,iterkb,iterkc],-t1[kb])
#vvvv_tmp[iterka,iterkb,iterkc] += einsum('bkdc,ka->abcd',eris.vovv[kb,ka,kd],-t1[ka])
vvvv_tmp[iterka,iterkb,iterkc] += einsum('kbcd,ka->abcd',eris_ovvv[iterka,iterkb,iterkc],-t1[ka])
Wabcd[s0,s1,s2] = vvvv_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wabcd, op=MPI.SUM)
return Wabcd
def Wvvvo(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wabcj = np.zeros((nkpts,nkpts,nkpts,nvir,nvir,nvir,nocc),dtype=t2.dtype)
WWvvvv = Wvvvv(cc,t1,t2,eris)
WW1ovov = W1ovov(cc,t1,t2,eris)
WW1ovvo = W1ovvo(cc,t1,t2,eris)
FFov = cc_Fov(cc,t1,t2,eris)
else:
Wabcj = fint['Wvvvo']
WWvvvv = fint['Wvvvv']
WW1ovov = fint['W1ovov']
WW1voov = fint['W1voov']
FFov = cc_Fov(cc,t1,t2,eris)
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nvir*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
nkpts_blksize2 = 1
BLKSIZE = (1,nkpts_blksize2,nkpts_blksize,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
vvvo_tmp_size = BLKSIZE + (nvir,nvir,nvir,nocc)
vvvo_tmp = np.empty(vvvo_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_vovv_aXc = _cp(eris.vovv[s0,:,s2])
eris_ovvv_Xac = _cp(eris.ovvv[:,s0,s2])
eris_ovvv_Xbc = _cp(eris.ovvv[:,s1,s2])
Wvvvv_abc = _cp(WWvvvv[s0,s1,s2])
W1voov_abc = _cp(WW1voov[s1,s0,:])
W1ovov_baX = _cp(WW1ovov[s1,s0,:])
for iterka,ka in enumerate(ranges0):
for iterkb,kb in enumerate(ranges1):
for iterkc,kc in enumerate(ranges2):
kj = kconserv[ka,kc,kb]
vvvo_tmp[iterka,iterkb,iterkc] = np.array(eris.vovv[kc,kj,ka]).transpose(2,3,0,1).conj()
vvvo_tmp[iterka,iterkb,iterkc] += einsum('abcd,jd->abcj',Wvvvv_abc[iterka,iterkb,iterkc],t1[kj])
vvvo_tmp[iterka,iterkb,iterkc] += einsum('lajc,lb->abcj',W1ovov_baX[iterkb,iterka,kj],-t1[kb])
vvvo_tmp[iterka,iterkb,iterkc] += einsum('bkjc,ka->abcj',W1voov_abc[iterkb,iterka,kj],-t1[ka])
kl_ranges = range(nkpts)
kd_ranges = kconserv[ka,kc,kl_ranges]
St2 = np.empty((nkpts,nocc,nocc,nvir,nvir),dtype=t2.dtype)
for kl in range(nkpts):
# ka + kl - kc - kd = 0
# => kd = ka - kc + kl
kd = kconserv[ka,kc,kl]
#St2[kl] = 2.*t2[kl,kj,kd]
St2[kl] = 2.*unpack_tril(t2,nkpts,kl,kj,kd,kconserv[kl,kd,kj])
#St2[kl] -= t2[kl,kj,kb].transpose(0,1,3,2)
St2[kl] -= unpack_tril(t2,nkpts,kl,kj,kb,kconserv[kl,kb,kj]).transpose(0,1,3,2)
vvvo_tmp[iterka,iterkb,iterkc] += einsum('alcd,ljdb->abcj',
eris_vovv_aXc[iterka,:,iterkc].transpose(1,0,2,3,4).reshape(nvir,nkpts*nocc,nvir,nvir),
St2.reshape(nkpts*nocc,nocc,nvir,nvir))
vvvo_tmp[iterka,iterkb,iterkc] += einsum('lacd,jlbd->abcj',
eris_ovvv_Xac[:,iterka,iterkc].reshape(nkpts*nocc,nvir,nvir,nvir),
# -t2[kj,:,kb].transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir))
-unpack_tril(t2,nkpts,kj,range(nkpts),kb,
kconserv[kj,kb,range(nkpts)]).transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir))
vvvo_tmp[iterka,iterkb,iterkc] += einsum('lbcd,ljad->abcj',
eris_ovvv_Xbc[:,iterkb,iterkc].reshape(nkpts*nocc,nvir,nvir,nvir),
# -t2[:,kj,ka].reshape(nkpts*nocc,nocc,nvir,nvir))
-unpack_tril(t2,nkpts,range(nkpts),kj,ka,kconserv[range(nkpts),ka,kj]).reshape(nkpts*nocc,nocc,nvir,nvir))
for kl in range(nkpts):
kk = kconserv[kb,kl,ka]
#vvvo_tmp[iterka,iterkb,iterkc] += einsum('jclk,lkba->abcj',eris.ovoo[kj,kc,kl].conj(),t2[kl,kk,kb])
vvvo_tmp[iterka,iterkb,iterkc] += einsum('jclk,lkba->abcj',eris.ovoo[kj,kc,kl].conj(),unpack_tril(t2,nkpts,kl,kk,kb,kconserv[kl,kb,kk]))
vvvo_tmp[iterka,iterkb,iterkc] += einsum('lkjc,lb,ka->abcj',eris.ooov[kb,ka,kj],t1[kb],t1[ka])
#vvvo_tmp[iterka,iterkb,iterkc] += einsum('lc,ljab->abcj',-FFov[kc],t2[kc,kj,ka])
vvvo_tmp[iterka,iterkb,iterkc] += einsum('lc,ljab->abcj',-FFov[kc],unpack_tril(t2,nkpts,kc,kj,ka,kconserv[kc,ka,kj]))
Wabcj[s0,s1,s2] = vvvo_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wabcj, op=MPI.SUM)
return Wabcj
def WvvvoR1(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wabcj = np.zeros((nkpts,nkpts,nkpts,nvir,nvir,nvir,nocc),dtype=t2.dtype)
WWvvvv = Wvvvv(cc,t1,t2,eris)
WW1ovov = W1ovov(cc,t1,t2,eris)
WW1ovvo = W1ovvo(cc,t1,t2,eris)
FFov = cc_Fov(cc,t1,t2,eris)
else:
Wabcj = fint['WvvvoR1']
WWvvvv = fint['Wvvvv']
WW1ovov = fint['W1ovov']
WW1voov = fint['W1voov']
FFov = cc_Fov(cc,t1,t2,eris)
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nvir*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (1,nkpts_blksize2,nkpts_blksize,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
vvvo_tmp_size = BLKSIZE + (nvir,nvir,nvir,nocc)
vvvo_tmp = np.empty(vvvo_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_vovv_aXc = _cp(eris.vovv[s0,:,s2])
eris_ovvv_Xac = _cp(eris.ovvv[:,s0,s2])
eris_ovvv_Xbc = _cp(eris.ovvv[:,s1,s2])
eris_vovvR1_cXa = _cp(eris.vovvR1[s0,s2,:])
Wvvvv_abc = _cp(WWvvvv[s0,s1,s2])
W1voov_baX = _cp(WW1voov[s1,s0,:])
W1ovov_baX = _cp(WW1ovov[s1,s0,:])
for iterka,ka in enumerate(ranges0):
for iterkb,kb in enumerate(ranges1):
for iterkc,kc in enumerate(ranges2):
kj = kconserv[ka,kc,kb]
#vvvo_tmp[iterka,iterkb,iterkc] = np.array(eris.vovv[kc,kj,ka]).transpose(2,3,0,1).conj()
vvvo_tmp[iterka,iterkb,iterkc] = np.array(eris_vovvR1_cXa[iterka,iterkc,kj]).transpose(2,3,0,1).conj()
vvvo_tmp[iterka,iterkb,iterkc] += einsum('abcd,jd->abcj',Wvvvv_abc[iterka,iterkb,iterkc],t1[kj])
vvvo_tmp[iterka,iterkb,iterkc] += einsum('lajc,lb->abcj',W1ovov_baX[iterkb,iterka,kj],-t1[kb])
vvvo_tmp[iterka,iterkb,iterkc] += einsum('bkjc,ka->abcj',W1voov_baX[iterkb,iterka,kj],-t1[ka])
kl_ranges = range(nkpts)
kd_ranges = kconserv[ka,kc,kl_ranges]
St2 = np.empty((nkpts,nocc,nocc,nvir,nvir),dtype=t2.dtype)
for kl in range(nkpts):
# ka + kl - kc - kd = 0
# => kd = ka - kc + kl
kd = kconserv[ka,kc,kl]
#St2[kl] = 2.*t2[kl,kj,kd]
St2[kl] = 2.*unpack_tril(t2,nkpts,kl,kj,kd,kconserv[kl,kd,kj])
#St2[kl] -= t2[kl,kj,kb].transpose(0,1,3,2)
St2[kl] -= unpack_tril(t2,nkpts,kl,kj,kb,kconserv[kl,kb,kj]).transpose(0,1,3,2)
vvvo_tmp[iterka,iterkb,iterkc] += einsum('alcd,ljdb->abcj',
eris_vovv_aXc[iterka,:,iterkc].transpose(1,0,2,3,4).reshape(nvir,nkpts*nocc,nvir,nvir),
St2.reshape(nkpts*nocc,nocc,nvir,nvir))
#vvvo_tmp[iterka,iterkb,iterkc] += einsum('alcd,ljdb->abcj',
# eris_vovvR1_aXc[iterkc,iterka,:].transpose(1,0,2,3,4).reshape(nvir,nkpts*nocc,nvir,nvir),
# St2.reshape(nkpts*nocc,nocc,nvir,nvir))
vvvo_tmp[iterka,iterkb,iterkc] += einsum('lacd,jlbd->abcj',
eris_ovvv_Xac[:,iterka,iterkc].reshape(nkpts*nocc,nvir,nvir,nvir),
-unpack_tril(t2,nkpts,kj,range(nkpts),kb,
kconserv[kj,kb,range(nkpts)]).transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir))
#vvvo_tmp[iterka,iterkb,iterkc] += einsum('lacd,jlbd->abcj',
# eris_ovvvRev_Xac[iterkc,iterka,:].reshape(nkpts*nocc,nvir,nvir,nvir),
# -t2[kj,:,kb].transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir))
vvvo_tmp[iterka,iterkb,iterkc] += einsum('lbcd,ljad->abcj',
eris_ovvv_Xbc[:,iterkb,iterkc].reshape(nkpts*nocc,nvir,nvir,nvir),
-unpack_tril(t2,nkpts,range(nkpts),kj,ka,
kconserv[range(nkpts),ka,kj]).reshape(nkpts*nocc,nocc,nvir,nvir))
#vvvo_tmp[iterka,iterkb,iterkc] += einsum('lbcd,ljad->abcj',
# eris_ovvvRev_Xbc[iterkc,iterkb,:].reshape(nkpts*nocc,nvir,nvir,nvir),
# -t2[:,kj,ka].reshape(nkpts*nocc,nocc,nvir,nvir))
#for kl in range(nkpts):
# kk = kconserv[kb,kl,ka]
# vvvo_tmp[iterka,iterkb,iterkc] += einsum('jclk,lkba->abcj',eris.ovoo[kj,kc,kl].conj(),t2[kl,kk,kb])
eris_ovoo_jcX = _cp(eris.ovoo[kj,kc,:])
t2_tmp = np.empty( (nkpts,nocc,nocc,nvir,nvir), dtype=t2.dtype)
for kl in range(nkpts):
kk = kconserv[kb,kl,ka]
t2_tmp[kl] = unpack_tril(t2,nkpts,kl,kk,kb,kconserv[kl,kb,kk])
#vvvo_tmp[iterka,iterkb,iterkc] += einsum('xjclk,xlkba->abcj',eris.ovoo[kj,kc,kl].conj(),t2[kl,kk,kb])
vvvo_tmp[iterka,iterkb,iterkc] += einsum('jcx,xba->abcj',eris_ovoo_jcX.transpose(1,2,0,3,4).reshape(nocc,nvir,-1).conj(),
t2_tmp.reshape(-1,nvir,nvir))
vvvo_tmp[iterka,iterkb,iterkc] += einsum('lkjc,lb,ka->abcj',eris.ooov[kb,ka,kj],t1[kb],t1[ka])
#vvvo_tmp[iterka,iterkb,iterkc] += einsum('lc,ljab->abcj',-FFov[kc],t2[kc,kj,ka])
vvvo_tmp[iterka,iterkb,iterkc] += einsum('lc,ljab->abcj',-FFov[kc],unpack_tril(t2,nkpts,kc,kj,ka,kconserv[kc,ka,kj]))
Wabcj[s2,s0,s1] = vvvo_tmp[:len(ranges0),:len(ranges1),:len(ranges2)].transpose(2,0,1,3,4,5,6)
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wabcj, op=MPI.SUM)
return Wabcj
def Wovoo(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
FFov = cc_Fov(cc,t1,t2,eris)
if fint is None:
Wkbij = np.zeros((nkpts,nkpts,nkpts,nocc,nvir,nocc,nocc),dtype=t2.dtype)
WW1ovov = W1ovov(cc,t1,t2,eris)
WWoooo = Woooo(cc,t1,t2,eris)
#WW1ovvo = W1ovvo(cc,t1,t2,eris)
WW1voov = W1voov(cc,t1,t2,eris)
else:
Wkbij = fint['Wovoo']
WW1ovov = fint['W1ovov']
WWoooo = fint['Woooo']
#WW1ovvo = fint['W1ovvo']
WW1voov = fint['W1voov']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nvir*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
nkpts_blksize2 = 1
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
print(BLKSIZE)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
ovoo_tmp_size = BLKSIZE + (nocc,nvir,nocc,nocc)
ovoo_tmp = np.empty(ovoo_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
WW1ovov_kbi = _cp(WW1ovov[s0,s1,s2])
WWoooo_kbi = _cp(WWoooo[s0,s1,s2])
#WW1ovvo_kbi = _cp(WW1ovvo[s0,s1,s2])
WW1voov_bkX = _cp(WW1voov[s1,s0,:])
eris_vovv_bkX = _cp(eris.vovv[s1,s0,:])
eris_ooov_XkX = _cp(eris.ooov[:,s0,:])
eris_ooov_kXi = _cp(eris.ooov[s0,:,s2])
for iterkk,kk in enumerate(ranges0):
for iterkb,kb in enumerate(ranges1):
for iterki,ki in enumerate(ranges2):
kj = kconserv[kk,ki,kb]
ovoo_tmp[iterkk,iterkb,iterki] = np.array(eris.ovoo[kk,kb,ki],copy=True)
ovoo_tmp[iterkk,iterkb,iterki] += einsum('kbid,jd->kbij',WW1ovov_kbi[iterkk,iterkb,iterki], t1[kj])
ovoo_tmp[iterkk,iterkb,iterki] += einsum('klij,lb->kbij', WWoooo_kbi[iterkk,iterkb,iterki],-t1[kb])
#ovoo_tmp[iterkk,iterkb,iterki] += einsum('kbcj,ic->kbij',WW1ovvo_kbi[iterkk,iterkb,iterki],t1[ki])
ovoo_tmp[iterkk,iterkb,iterki] += einsum('bkjc,ic->kbij',WW1voov_bkX[iterkb,iterkk,kj],t1[ki])
ovoo_tmp[iterkk,iterkb,iterki] += einsum('lkid,jlbd->kbij', -eris_ooov_XkX[:,iterkk,ki].reshape(nkpts*nocc,nocc,nocc,nvir),
unpack_tril(t2,nkpts,kj,range(nkpts),kb,
kconserv[kj,kb,range(nkpts)]).transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir))
ovoo_tmp[iterkk,iterkb,iterki] += einsum('lkjd,libd->kbij', -eris_ooov_XkX[:,iterkk,kj].reshape(nkpts*nocc,nocc,nocc,nvir),
unpack_tril(t2,nkpts,range(nkpts),ki,kb,
kconserv[range(nkpts),kb,ki]).reshape(nkpts*nocc,nocc,nvir,nvir))
#St2 = 2.*t2[kj,:,kb]
St2 = 2.*unpack_tril(t2,nkpts,kj,range(nkpts),kb,kconserv[kj,kb,range(nkpts)])
#St2 -= t2[:,kj,kb].transpose(0,2,1,3,4)
St2 -= unpack_tril(t2,nkpts,range(nkpts),kj,kb,kconserv[range(nkpts),kb,kj]).transpose(0,2,1,3,4)
St2 = St2.transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir)
ovoo_tmp[iterkk,iterkb,iterki] += einsum('klid,jlbd->kbij', eris_ooov_kXi[iterkk,:,iterki].transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nocc,nvir), St2)
#tau1 = t2[kj,ki,:].copy()
tau1 = unpack_tril(t2,nkpts,kj,ki,range(nkpts),kconserv[kj,range(nkpts),ki]).copy()
tau1[kj] += einsum('jd,ic->jidc',t1[kj],t1[ki])
ovoo_tmp[iterkk,iterkb,iterki] += einsum('bkdc,jidc->kbij', eris_vovv_bkX[iterkb,iterkk,:].transpose(1,2,0,3,4).reshape(nvir,nocc,nvir*nkpts,nvir),
tau1.transpose(1,2,0,3,4).reshape(nocc,nocc,nkpts*nvir,nvir))
#ovoo_tmp[iterkk,iterkb,iterki] += einsum('kc,ijcb->kbij', FFov[kk],t2[ki,kj,kk])
ovoo_tmp[iterkk,iterkb,iterki] += einsum('kc,ijcb->kbij', FFov[kk],unpack_tril(t2,nkpts,ki,kj,kk,kconserv[ki,kk,kj]))
Wkbij[s0,s1,s2] = ovoo_tmp[:len(ranges0),:len(ranges1),:len(ranges2)]
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkbij, op=MPI.SUM)
return Wkbij
def W1voov(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkaci = np.zeros((nkpts,nkpts,nkpts,nvir,nocc,nocc,nvir),dtype=t1.dtype)
else:
Wkaci = fint['W1voov']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
ovvo_tmp_size = BLKSIZE + (nocc,nvir,nvir,nocc)
ovvo_tmp = np.empty(ovvo_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
eris_ovvo_kac = _cp(eris.ovvo[s0,s1,s2])
eris_oovv_kXc = _cp(eris.oovv[s0,:,s2])
eris_oovv_Xkc = _cp(eris.oovv[:,s0,s2])
for iterkk,kk in enumerate(ranges0):
for iterka,ka in enumerate(ranges1):
for iterkc,kc in enumerate(ranges2):
ki = kconserv[kk,kc,ka]
ovvo_tmp[iterkk,iterka,iterkc] = _cp(eris_ovvo_kac[iterkk,iterka,iterkc])
#St2 = 2.*t2[ki,:,ka]
St2 = 2.*unpack_tril(t2,nkpts,ki,range(nkpts),ka,kconserv[ki,ka,range(nkpts)])
#St2 -= t2[:,ki,ka].transpose(0,2,1,3,4)
St2 -= unpack_tril(t2,nkpts,range(nkpts),ki,ka,kconserv[range(nkpts),ka,ki]).transpose(0,2,1,3,4)
ovvo_tmp[iterkk,iterka,iterkc] += einsum('klcd,ilad->kaci',eris_oovv_kXc[iterkk,:,iterkc].transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir),
St2.transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir))
ovvo_tmp[iterkk,iterka,iterkc] += -einsum('lkcd,ilad->kaci',eris_oovv_Xkc[:,iterkk,iterkc].reshape(nocc*nkpts,nocc,nvir,nvir),
unpack_tril(t2,nkpts,ki,range(nkpts),ka,
kconserv[ki,ka,range(nkpts)]).transpose(1,0,2,3,4).reshape(nocc,nkpts*nocc,nvir,nvir))
Wkaci[ka,kk,ki] = ovvo_tmp[iterkk,iterka,iterkc].transpose(1,0,3,2)
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkaci, op=MPI.SUM)
return Wkaci
def W2voov(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkaci = np.zeros((nkpts,nkpts,nkpts,nvir,nocc,nocc,nvir),dtype=t1.dtype)
WWooov = Wooov(cc,t1,t2,eris)
else:
Wkaci = fint['W2voov']
WWooov = fint['Wooov']
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nvir*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
ovvo_tmp_size = BLKSIZE + (nocc,nvir,nvir,nocc)
ovvo_tmp = np.empty(ovvo_tmp_size,dtype=t2.dtype)
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
Wooov_akX = _cp(WWooov[s1,s0])
eris_ovvv_kac = _cp(eris.ovvv[s0,s1,s2])
for iterkk,kk in enumerate(ranges0):
for iterka,ka in enumerate(ranges1):
for iterkc,kc in enumerate(ranges2):
ki = kconserv[kk,kc,ka]
ovvo_tmp[iterkk,iterka,iterkc] = einsum('la,lkic->kaci',-t1[ka],Wooov_akX[iterka,iterkk,ki])
ovvo_tmp[iterkk,iterka,iterkc] += einsum('akdc,id->kaci',eris_ovvv_kac[iterkk,iterka,iterkc].transpose(1,0,3,2),t1[ki])
Wkaci[ka,kk,ki] = ovvo_tmp[iterkk,iterka,iterkc].transpose(1,0,3,2)
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkaci, op=MPI.SUM)
return Wkaci
def Wvoov(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkaci = np.zeros((nkpts,nkpts,nkpts,nvir,nocc,nocc,nvir),dtype=t1.dtype)
W1kaci = W1voov(cc,t1,t2,eris,fint)
W2kaci = W2voov(cc,t1,t2,eris,fint)
else:
Wkaci = fint['Wvoov']
W1kaci = fint['W1voov']
W2kaci = fint['W2voov']
# TODO can do much better than this... call recursive function
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
Wkaci[s0,s1,s2] = _cp(W1kaci[s0,s1,s2]) + _cp(W2kaci[s0,s1,s2])
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkaci, op=MPI.SUM)
return Wkaci
def WvoovR1(cc,t1,t2,eris,fint=None):
nkpts, nocc, nvir = t1.shape
kconserv = cc.kconserv
if fint is None:
Wkaci = np.zeros((nkpts,nkpts,nkpts,nvir,nocc,nocc,nvir),dtype=t1.dtype)
W1kaci = W1voov(cc,t1,t2,eris,fint)
W2kaci = W2voov(cc,t1,t2,eris,fint)
else:
Wkaci = fint['WvoovR1']
W1kaci = fint['W1voov']
W2kaci = fint['W2voov']
# TODO can do much better than this... call recursive function
# Adaptive blocking begins here
mem = 0.5e9
pre = 1.*nocc*nocc*nvir*nvir*nkpts*16
nkpts_blksize = min(max(int(np.floor(mem/pre)),1),nkpts)
nkpts_blksize2 = min(max(int(np.floor(mem/(pre*nkpts_blksize))),1),nkpts)
BLKSIZE = (nkpts_blksize2,nkpts_blksize,nkpts,)
loader = mpi_load_balancer.load_balancer(BLKSIZE=BLKSIZE)
loader.set_ranges((range(nkpts),range(nkpts),range(nkpts),))
# Adaptive blocking ends here
good2go = True
while(good2go):
good2go, data = loader.slave_set()
if good2go is False:
break
ranges0, ranges1, ranges2 = loader.get_blocks_from_data(data)
s0,s1,s2 = [slice(min(x),max(x)+1) for x in (ranges0,ranges1,ranges2)]
Wkaci[s2,s0,s1] = (_cp(W1kaci[s0,s1,s2]) + _cp(W2kaci[s0,s1,s2])).transpose(2,0,1,3,4,5,6)
loader.slave_finished()
comm.Barrier()
if fint is None:
comm.Allreduce(MPI.IN_PLACE, Wkaci, op=MPI.SUM)
return Wkaci
def _cp(a):
return np.array(a, copy=False, order='C')
|
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Tests for swift.common.swob"
import datetime
import unittest
import re
import time
from six import BytesIO
from six.moves.urllib.parse import quote
import swift.common.swob
from swift.common import utils, exceptions
class TestHeaderEnvironProxy(unittest.TestCase):
def test_proxy(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assertEqual(
proxy.environ, {'CONTENT_LENGTH': '20',
'CONTENT_TYPE': 'text/plain',
'HTTP_SOMETHING_ELSE': 'somevalue'})
self.assertEqual(proxy['content-length'], '20')
self.assertEqual(proxy['content-type'], 'text/plain')
self.assertEqual(proxy['something-else'], 'somevalue')
self.assertEqual(set(['Something-Else',
'Content-Length', 'Content-Type']),
set(proxy.keys()))
self.assertEqual(list(iter(proxy)), proxy.keys())
self.assertEqual(3, len(proxy))
def test_ignored_keys(self):
# Constructor doesn't normalize keys
key = 'wsgi.input'
environ = {key: ''}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
self.assertEqual([], list(iter(proxy)))
self.assertEqual([], proxy.keys())
self.assertEqual(0, len(proxy))
self.assertRaises(KeyError, proxy.__getitem__, key)
self.assertNotIn(key, proxy)
proxy['Content-Type'] = 'text/plain'
self.assertEqual(['Content-Type'], list(iter(proxy)))
self.assertEqual(['Content-Type'], proxy.keys())
self.assertEqual(1, len(proxy))
self.assertEqual('text/plain', proxy['Content-Type'])
self.assertIn('Content-Type', proxy)
def test_del(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
del proxy['Content-Length']
del proxy['Content-Type']
del proxy['Something-Else']
self.assertEqual(proxy.environ, {})
self.assertEqual(0, len(proxy))
def test_contains(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assertTrue('content-length' in proxy)
self.assertTrue('content-type' in proxy)
self.assertTrue('something-else' in proxy)
def test_keys(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assertEqual(
set(proxy.keys()),
set(('Content-Length', 'Content-Type', 'Something-Else')))
class TestRange(unittest.TestCase):
def test_range(self):
swob_range = swift.common.swob.Range('bytes=1-7')
self.assertEqual(swob_range.ranges[0], (1, 7))
def test_upsidedown_range(self):
swob_range = swift.common.swob.Range('bytes=5-10')
self.assertEqual(swob_range.ranges_for_length(2), [])
def test_str(self):
for range_str in ('bytes=1-7', 'bytes=1-', 'bytes=-1',
'bytes=1-7,9-12', 'bytes=-7,9-'):
swob_range = swift.common.swob.Range(range_str)
self.assertEqual(str(swob_range), range_str)
def test_ranges_for_length(self):
swob_range = swift.common.swob.Range('bytes=1-7')
self.assertEqual(swob_range.ranges_for_length(10), [(1, 8)])
self.assertEqual(swob_range.ranges_for_length(5), [(1, 5)])
self.assertEqual(swob_range.ranges_for_length(None), None)
def test_ranges_for_large_length(self):
swob_range = swift.common.swob.Range('bytes=-100000000000000000000000')
self.assertEqual(swob_range.ranges_for_length(100), [(0, 100)])
def test_ranges_for_length_no_end(self):
swob_range = swift.common.swob.Range('bytes=1-')
self.assertEqual(swob_range.ranges_for_length(10), [(1, 10)])
self.assertEqual(swob_range.ranges_for_length(5), [(1, 5)])
self.assertEqual(swob_range.ranges_for_length(None), None)
# This used to freak out:
swob_range = swift.common.swob.Range('bytes=100-')
self.assertEqual(swob_range.ranges_for_length(5), [])
self.assertEqual(swob_range.ranges_for_length(None), None)
swob_range = swift.common.swob.Range('bytes=4-6,100-')
self.assertEqual(swob_range.ranges_for_length(5), [(4, 5)])
def test_ranges_for_length_no_start(self):
swob_range = swift.common.swob.Range('bytes=-7')
self.assertEqual(swob_range.ranges_for_length(10), [(3, 10)])
self.assertEqual(swob_range.ranges_for_length(5), [(0, 5)])
self.assertEqual(swob_range.ranges_for_length(None), None)
swob_range = swift.common.swob.Range('bytes=4-6,-100')
self.assertEqual(swob_range.ranges_for_length(5), [(4, 5), (0, 5)])
def test_ranges_for_length_multi(self):
swob_range = swift.common.swob.Range('bytes=-20,4-')
self.assertEqual(len(swob_range.ranges_for_length(200)), 2)
# the actual length greater than each range element
self.assertEqual(swob_range.ranges_for_length(200),
[(180, 200), (4, 200)])
swob_range = swift.common.swob.Range('bytes=30-150,-10')
self.assertEqual(len(swob_range.ranges_for_length(200)), 2)
# the actual length lands in the middle of a range
self.assertEqual(swob_range.ranges_for_length(90),
[(30, 90), (80, 90)])
# the actual length greater than any of the range
self.assertEqual(swob_range.ranges_for_length(200),
[(30, 151), (190, 200)])
self.assertEqual(swob_range.ranges_for_length(None), None)
def test_ranges_for_length_edges(self):
swob_range = swift.common.swob.Range('bytes=0-1, -7')
self.assertEqual(swob_range.ranges_for_length(10),
[(0, 2), (3, 10)])
swob_range = swift.common.swob.Range('bytes=-7, 0-1')
self.assertEqual(swob_range.ranges_for_length(10),
[(3, 10), (0, 2)])
swob_range = swift.common.swob.Range('bytes=-7, 0-1')
self.assertEqual(swob_range.ranges_for_length(5),
[(0, 5), (0, 2)])
def test_ranges_for_length_overlapping(self):
# Fewer than 3 overlaps is okay
swob_range = swift.common.swob.Range('bytes=10-19,15-24')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 20), (15, 25)])
swob_range = swift.common.swob.Range('bytes=10-19,15-24,20-29')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 20), (15, 25), (20, 30)])
# Adjacent ranges, though suboptimal, don't overlap
swob_range = swift.common.swob.Range('bytes=10-19,20-29,30-39')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 20), (20, 30), (30, 40)])
# Ranges that share a byte do overlap
swob_range = swift.common.swob.Range('bytes=10-20,20-30,30-40,40-50')
self.assertEqual(swob_range.ranges_for_length(100), [])
# With suffix byte range specs (e.g. bytes=-2), make sure that we
# correctly determine overlapping-ness based on the entity length
swob_range = swift.common.swob.Range('bytes=10-15,15-20,30-39,-9')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 16), (15, 21), (30, 40), (91, 100)])
self.assertEqual(swob_range.ranges_for_length(20), [])
def test_ranges_for_length_nonascending(self):
few_ranges = ("bytes=100-109,200-209,300-309,500-509,"
"400-409,600-609,700-709")
many_ranges = few_ranges + ",800-809"
swob_range = swift.common.swob.Range(few_ranges)
self.assertEqual(swob_range.ranges_for_length(100000),
[(100, 110), (200, 210), (300, 310), (500, 510),
(400, 410), (600, 610), (700, 710)])
swob_range = swift.common.swob.Range(many_ranges)
self.assertEqual(swob_range.ranges_for_length(100000), [])
def test_ranges_for_length_too_many(self):
at_the_limit_ranges = (
"bytes=" + ",".join("%d-%d" % (x * 1000, x * 1000 + 10)
for x in range(50)))
too_many_ranges = at_the_limit_ranges + ",10000000-10000009"
rng = swift.common.swob.Range(at_the_limit_ranges)
self.assertEqual(len(rng.ranges_for_length(1000000000)), 50)
rng = swift.common.swob.Range(too_many_ranges)
self.assertEqual(rng.ranges_for_length(1000000000), [])
def test_range_invalid_syntax(self):
def _assert_invalid_range(range_value):
try:
swift.common.swob.Range(range_value)
self.fail("Expected %r to be invalid, but wasn't" %
(range_value,))
except ValueError:
pass
"""
All the following cases should result ValueError exception
1. value not starts with bytes=
2. range value start is greater than the end, eg. bytes=5-3
3. range does not have start or end, eg. bytes=-
4. range does not have hyphen, eg. bytes=45
5. range value is non numeric
6. any combination of the above
"""
_assert_invalid_range(None)
_assert_invalid_range('nonbytes=0-')
_assert_invalid_range('nonbytes=foobar,10-2')
_assert_invalid_range('bytes=5-3')
_assert_invalid_range('bytes=-')
_assert_invalid_range('bytes=45')
_assert_invalid_range('bytes=foo-bar,3-5')
_assert_invalid_range('bytes=4-10,45')
_assert_invalid_range('bytes=foobar,3-5')
_assert_invalid_range('bytes=nonumber-5')
_assert_invalid_range('bytes=nonumber')
_assert_invalid_range('bytes=--1')
_assert_invalid_range('bytes=--0')
class TestMatch(unittest.TestCase):
def test_match(self):
match = swift.common.swob.Match('"a", "b"')
self.assertEqual(match.tags, set(('a', 'b')))
self.assertTrue('a' in match)
self.assertTrue('b' in match)
self.assertTrue('c' not in match)
def test_match_star(self):
match = swift.common.swob.Match('"a", "*"')
self.assertTrue('a' in match)
self.assertTrue('b' in match)
self.assertTrue('c' in match)
def test_match_noquote(self):
match = swift.common.swob.Match('a, b')
self.assertEqual(match.tags, set(('a', 'b')))
self.assertTrue('a' in match)
self.assertTrue('b' in match)
self.assertTrue('c' not in match)
class TestTransferEncoding(unittest.TestCase):
def test_is_chunked(self):
headers = {}
self.assertFalse(swift.common.swob.is_chunked(headers))
headers['Transfer-Encoding'] = 'chunked'
self.assertTrue(swift.common.swob.is_chunked(headers))
headers['Transfer-Encoding'] = 'gzip,chunked'
try:
swift.common.swob.is_chunked(headers)
except AttributeError as e:
self.assertEqual(str(e), "Unsupported Transfer-Coding header"
" value specified in Transfer-Encoding header")
else:
self.fail("Expected an AttributeError raised for 'gzip'")
headers['Transfer-Encoding'] = 'gzip'
try:
swift.common.swob.is_chunked(headers)
except ValueError as e:
self.assertEqual(str(e), "Invalid Transfer-Encoding header value")
else:
self.fail("Expected a ValueError raised for 'gzip'")
headers['Transfer-Encoding'] = 'gzip,identity'
try:
swift.common.swob.is_chunked(headers)
except AttributeError as e:
self.assertEqual(str(e), "Unsupported Transfer-Coding header"
" value specified in Transfer-Encoding header")
else:
self.fail("Expected an AttributeError raised for 'gzip,identity'")
class TestAccept(unittest.TestCase):
def test_accept_json(self):
for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/json;q=1.0', 'application/*',
'text/*,application/json', 'application/*,text/*',
'application/json,text/xml'):
acc = swift.common.swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/json',
'application/xml', 'text/xml'])
self.assertEqual(match, 'application/json')
def test_accept_plain(self):
for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=0.8', '*/*',
'text/plain,application/xml'):
acc = swift.common.swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/json',
'application/xml', 'text/xml'])
self.assertEqual(match, 'text/plain')
def test_accept_xml(self):
for accept in ('application/xml', 'application/xml;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=1.0',
'application/xml;charset=UTF-8',
'application/xml;charset=UTF-8;qws="quoted with space"',
'application/xml; q=0.99 ; qws="quoted with space"'):
acc = swift.common.swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/xml',
'text/xml'])
self.assertEqual(match, 'application/xml')
def test_accept_invalid(self):
for accept in ('*', 'text/plain,,', 'some stuff',
'application/xml;q=1.0;q=1.1', 'text/plain,*',
'text /plain', 'text\x7f/plain',
'text/plain;a=b=c',
'text/plain;q=1;q=2',
'text/plain; ubq="unbalanced " quotes"'):
acc = swift.common.swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/xml',
'text/xml'])
self.assertEqual(match, None)
def test_repr(self):
acc = swift.common.swob.Accept("application/json")
self.assertEqual(repr(acc), "application/json")
class TestRequest(unittest.TestCase):
def test_blank(self):
req = swift.common.swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'}, body='hi')
self.assertEqual(req.path_info, '/')
self.assertEqual(req.body, 'hi')
self.assertEqual(req.headers['Content-Type'], 'text/plain')
self.assertEqual(req.method, 'POST')
def test_blank_req_environ_property_args(self):
blank = swift.common.swob.Request.blank
req = blank('/', method='PATCH')
self.assertEqual(req.method, 'PATCH')
self.assertEqual(req.environ['REQUEST_METHOD'], 'PATCH')
req = blank('/', referer='http://example.com')
self.assertEqual(req.referer, 'http://example.com')
self.assertEqual(req.referrer, 'http://example.com')
self.assertEqual(req.environ['HTTP_REFERER'], 'http://example.com')
self.assertEqual(req.headers['Referer'], 'http://example.com')
req = blank('/', script_name='/application')
self.assertEqual(req.script_name, '/application')
self.assertEqual(req.environ['SCRIPT_NAME'], '/application')
req = blank('/', host='www.example.com')
self.assertEqual(req.host, 'www.example.com')
self.assertEqual(req.environ['HTTP_HOST'], 'www.example.com')
self.assertEqual(req.headers['Host'], 'www.example.com')
req = blank('/', remote_addr='127.0.0.1')
self.assertEqual(req.remote_addr, '127.0.0.1')
self.assertEqual(req.environ['REMOTE_ADDR'], '127.0.0.1')
req = blank('/', remote_user='username')
self.assertEqual(req.remote_user, 'username')
self.assertEqual(req.environ['REMOTE_USER'], 'username')
req = blank('/', user_agent='curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEqual(req.user_agent, 'curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEqual(req.environ['HTTP_USER_AGENT'],
'curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEqual(req.headers['User-Agent'],
'curl/7.22.0 (x86_64-pc-linux-gnu)')
req = blank('/', query_string='a=b&c=d')
self.assertEqual(req.query_string, 'a=b&c=d')
self.assertEqual(req.environ['QUERY_STRING'], 'a=b&c=d')
req = blank('/', if_match='*')
self.assertEqual(req.environ['HTTP_IF_MATCH'], '*')
self.assertEqual(req.headers['If-Match'], '*')
# multiple environ property kwargs
req = blank('/', method='PATCH', referer='http://example.com',
script_name='/application', host='www.example.com',
remote_addr='127.0.0.1', remote_user='username',
user_agent='curl/7.22.0 (x86_64-pc-linux-gnu)',
query_string='a=b&c=d', if_match='*')
self.assertEqual(req.method, 'PATCH')
self.assertEqual(req.referer, 'http://example.com')
self.assertEqual(req.script_name, '/application')
self.assertEqual(req.host, 'www.example.com')
self.assertEqual(req.remote_addr, '127.0.0.1')
self.assertEqual(req.remote_user, 'username')
self.assertEqual(req.user_agent, 'curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEqual(req.query_string, 'a=b&c=d')
self.assertEqual(req.environ['QUERY_STRING'], 'a=b&c=d')
def test_invalid_req_environ_property_args(self):
# getter only property
try:
swift.common.swob.Request.blank(
'/', host_url='http://example.com:8080/v1/a/c/o')
except TypeError as e:
self.assertEqual("got unexpected keyword argument 'host_url'",
str(e))
else:
self.assertTrue(False, "invalid req_environ_property "
"didn't raise error!")
# regular attribute
try:
swift.common.swob.Request.blank('/', _params_cache={'a': 'b'})
except TypeError as e:
self.assertEqual("got unexpected keyword "
"argument '_params_cache'", str(e))
else:
self.assertTrue(False, "invalid req_environ_property "
"didn't raise error!")
# non-existent attribute
try:
swift.common.swob.Request.blank('/', params_cache={'a': 'b'})
except TypeError as e:
self.assertEqual("got unexpected keyword "
"argument 'params_cache'", str(e))
else:
self.assertTrue(False, "invalid req_environ_property "
"didn't raise error!")
# method
try:
swift.common.swob.Request.blank(
'/', as_referer='GET http://example.com')
except TypeError as e:
self.assertEqual("got unexpected keyword "
"argument 'as_referer'", str(e))
else:
self.assertTrue(False, "invalid req_environ_property "
"didn't raise error!")
def test_blank_path_info_precedence(self):
blank = swift.common.swob.Request.blank
req = blank('/a')
self.assertEqual(req.path_info, '/a')
req = blank('/a', environ={'PATH_INFO': '/a/c'})
self.assertEqual(req.path_info, '/a/c')
req = blank('/a', environ={'PATH_INFO': '/a/c'}, path_info='/a/c/o')
self.assertEqual(req.path_info, '/a/c/o')
req = blank('/a', path_info='/a/c/o')
self.assertEqual(req.path_info, '/a/c/o')
def test_blank_body_precedence(self):
req = swift.common.swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST',
'wsgi.input': BytesIO(b'')},
headers={'Content-Type': 'text/plain'}, body='hi')
self.assertEqual(req.path_info, '/')
self.assertEqual(req.body, 'hi')
self.assertEqual(req.headers['Content-Type'], 'text/plain')
self.assertEqual(req.method, 'POST')
body_file = BytesIO(b'asdf')
req = swift.common.swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST',
'wsgi.input': BytesIO(b'')},
headers={'Content-Type': 'text/plain'}, body='hi',
body_file=body_file)
self.assertTrue(req.body_file is body_file)
req = swift.common.swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST',
'wsgi.input': BytesIO(b'')},
headers={'Content-Type': 'text/plain'}, body='hi',
content_length=3)
self.assertEqual(req.content_length, 3)
self.assertEqual(len(req.body), 2)
def test_blank_parsing(self):
req = swift.common.swob.Request.blank('http://test.com/')
self.assertEqual(req.environ['wsgi.url_scheme'], 'http')
self.assertEqual(req.environ['SERVER_PORT'], '80')
self.assertEqual(req.environ['SERVER_NAME'], 'test.com')
req = swift.common.swob.Request.blank('https://test.com:456/')
self.assertEqual(req.environ['wsgi.url_scheme'], 'https')
self.assertEqual(req.environ['SERVER_PORT'], '456')
req = swift.common.swob.Request.blank('test.com/')
self.assertEqual(req.environ['wsgi.url_scheme'], 'http')
self.assertEqual(req.environ['SERVER_PORT'], '80')
self.assertEqual(req.environ['PATH_INFO'], 'test.com/')
self.assertRaises(TypeError, swift.common.swob.Request.blank,
'ftp://test.com/')
def test_params(self):
req = swift.common.swob.Request.blank('/?a=b&c=d')
self.assertEqual(req.params['a'], 'b')
self.assertEqual(req.params['c'], 'd')
new_params = {'e': 'f', 'g': 'h'}
req.params = new_params
self.assertDictEqual(new_params, req.params)
new_params = (('i', 'j'), ('k', 'l'))
req.params = new_params
self.assertDictEqual(dict(new_params), req.params)
def test_timestamp_missing(self):
req = swift.common.swob.Request.blank('/')
self.assertRaises(exceptions.InvalidTimestamp,
getattr, req, 'timestamp')
def test_timestamp_invalid(self):
req = swift.common.swob.Request.blank(
'/', headers={'X-Timestamp': 'asdf'})
self.assertRaises(exceptions.InvalidTimestamp,
getattr, req, 'timestamp')
def test_timestamp(self):
req = swift.common.swob.Request.blank(
'/', headers={'X-Timestamp': '1402447134.13507_00000001'})
expected = utils.Timestamp('1402447134.13507', offset=1)
self.assertEqual(req.timestamp, expected)
self.assertEqual(req.timestamp.normal, expected.normal)
self.assertEqual(req.timestamp.internal, expected.internal)
def test_path(self):
req = swift.common.swob.Request.blank('/hi?a=b&c=d')
self.assertEqual(req.path, '/hi')
req = swift.common.swob.Request.blank(
'/', environ={'SCRIPT_NAME': '/hi', 'PATH_INFO': '/there'})
self.assertEqual(req.path, '/hi/there')
def test_path_question_mark(self):
req = swift.common.swob.Request.blank('/test%3Ffile')
# This tests that .blank unquotes the path when setting PATH_INFO
self.assertEqual(req.environ['PATH_INFO'], '/test?file')
# This tests that .path requotes it
self.assertEqual(req.path, '/test%3Ffile')
def test_path_info_pop(self):
req = swift.common.swob.Request.blank('/hi/there')
self.assertEqual(req.path_info_pop(), 'hi')
self.assertEqual(req.path_info, '/there')
self.assertEqual(req.script_name, '/hi')
def test_bad_path_info_pop(self):
req = swift.common.swob.Request.blank('blahblah')
self.assertEqual(req.path_info_pop(), None)
def test_path_info_pop_last(self):
req = swift.common.swob.Request.blank('/last')
self.assertEqual(req.path_info_pop(), 'last')
self.assertEqual(req.path_info, '')
self.assertEqual(req.script_name, '/last')
def test_path_info_pop_none(self):
req = swift.common.swob.Request.blank('/')
self.assertEqual(req.path_info_pop(), '')
self.assertEqual(req.path_info, '')
self.assertEqual(req.script_name, '/')
def test_copy_get(self):
req = swift.common.swob.Request.blank(
'/hi/there', environ={'REQUEST_METHOD': 'POST'})
self.assertEqual(req.method, 'POST')
req2 = req.copy_get()
self.assertEqual(req2.method, 'GET')
def test_get_response(self):
def test_app(environ, start_response):
start_response('200 OK', [])
return ['hi']
req = swift.common.swob.Request.blank('/')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, 'hi')
def test_401_unauthorized(self):
# No request environment
resp = swift.common.swob.HTTPUnauthorized()
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
# Request environment
req = swift.common.swob.Request.blank('/')
resp = swift.common.swob.HTTPUnauthorized(request=req)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_401_valid_account_path(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return ['hi']
# Request environment contains valid account in path
req = swift.common.swob.Request.blank('/v1/account-name')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="account-name"',
resp.headers['Www-Authenticate'])
# Request environment contains valid account/container in path
req = swift.common.swob.Request.blank('/v1/account-name/c')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="account-name"',
resp.headers['Www-Authenticate'])
def test_401_invalid_path(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return ['hi']
# Request environment contains bad path
req = swift.common.swob.Request.blank('/random')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="unknown"',
resp.headers['Www-Authenticate'])
def test_401_non_keystone_auth_path(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return ['no creds in request']
# Request to get token
req = swift.common.swob.Request.blank('/v1.0/auth')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="unknown"',
resp.headers['Www-Authenticate'])
# Other form of path
req = swift.common.swob.Request.blank('/auth/v1.0')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="unknown"',
resp.headers['Www-Authenticate'])
def test_401_www_authenticate_exists(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', {
'Www-Authenticate': 'Me realm="whatever"'})
return ['no creds in request']
# Auth middleware sets own Www-Authenticate
req = swift.common.swob.Request.blank('/auth/v1.0')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Me realm="whatever"',
resp.headers['Www-Authenticate'])
def test_401_www_authenticate_is_quoted(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return ['hi']
hacker = 'account-name\n\n<b>foo<br>' # url injection test
quoted_hacker = quote(hacker)
req = swift.common.swob.Request.blank('/v1/' + hacker)
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="%s"' % quoted_hacker,
resp.headers['Www-Authenticate'])
req = swift.common.swob.Request.blank('/v1/' + quoted_hacker)
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="%s"' % quoted_hacker,
resp.headers['Www-Authenticate'])
def test_not_401(self):
# Other status codes should not have WWW-Authenticate in response
def test_app(environ, start_response):
start_response('200 OK', [])
return ['hi']
req = swift.common.swob.Request.blank('/')
resp = req.get_response(test_app)
self.assertTrue('Www-Authenticate' not in resp.headers)
def test_properties(self):
req = swift.common.swob.Request.blank('/hi/there', body='hi')
self.assertEqual(req.body, 'hi')
self.assertEqual(req.content_length, 2)
req.remote_addr = 'something'
self.assertEqual(req.environ['REMOTE_ADDR'], 'something')
req.body = 'whatever'
self.assertEqual(req.content_length, 8)
self.assertEqual(req.body, 'whatever')
self.assertEqual(req.method, 'GET')
req.range = 'bytes=1-7'
self.assertEqual(req.range.ranges[0], (1, 7))
self.assertTrue('Range' in req.headers)
req.range = None
self.assertTrue('Range' not in req.headers)
def test_datetime_properties(self):
req = swift.common.swob.Request.blank('/hi/there', body='hi')
req.if_unmodified_since = 0
self.assertTrue(isinstance(req.if_unmodified_since, datetime.datetime))
if_unmodified_since = req.if_unmodified_since
req.if_unmodified_since = if_unmodified_since
self.assertEqual(if_unmodified_since, req.if_unmodified_since)
req.if_unmodified_since = 'something'
self.assertEqual(req.headers['If-Unmodified-Since'], 'something')
self.assertEqual(req.if_unmodified_since, None)
self.assertTrue('If-Unmodified-Since' in req.headers)
req.if_unmodified_since = None
self.assertTrue('If-Unmodified-Since' not in req.headers)
too_big_date_list = list(datetime.datetime.max.timetuple())
too_big_date_list[0] += 1 # bump up the year
too_big_date = time.strftime(
"%a, %d %b %Y %H:%M:%S UTC", time.struct_time(too_big_date_list))
req.if_unmodified_since = too_big_date
self.assertEqual(req.if_unmodified_since, None)
def test_bad_range(self):
req = swift.common.swob.Request.blank('/hi/there', body='hi')
req.range = 'bad range'
self.assertEqual(req.range, None)
def test_accept_header(self):
req = swift.common.swob.Request({'REQUEST_METHOD': 'GET',
'PATH_INFO': '/',
'HTTP_ACCEPT': 'application/json'})
self.assertEqual(
req.accept.best_match(['application/json', 'text/plain']),
'application/json')
self.assertEqual(
req.accept.best_match(['text/plain', 'application/json']),
'application/json')
def test_swift_entity_path(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.assertEqual(req.swift_entity_path, '/a/c/o')
req = swift.common.swob.Request.blank('/v1/a/c')
self.assertEqual(req.swift_entity_path, '/a/c')
req = swift.common.swob.Request.blank('/v1/a')
self.assertEqual(req.swift_entity_path, '/a')
req = swift.common.swob.Request.blank('/v1')
self.assertEqual(req.swift_entity_path, None)
def test_path_qs(self):
req = swift.common.swob.Request.blank('/hi/there?hello=equal&acl')
self.assertEqual(req.path_qs, '/hi/there?hello=equal&acl')
req = swift.common.swob.Request({'PATH_INFO': '/hi/there',
'QUERY_STRING': 'hello=equal&acl'})
self.assertEqual(req.path_qs, '/hi/there?hello=equal&acl')
def test_url(self):
req = swift.common.swob.Request.blank('/hi/there?hello=equal&acl')
self.assertEqual(req.url,
'http://localhost/hi/there?hello=equal&acl')
def test_wsgify(self):
used_req = []
@swift.common.swob.wsgify
def _wsgi_func(req):
used_req.append(req)
return swift.common.swob.Response('200 OK')
req = swift.common.swob.Request.blank('/hi/there')
resp = req.get_response(_wsgi_func)
self.assertEqual(used_req[0].path, '/hi/there')
self.assertEqual(resp.status_int, 200)
def test_wsgify_raise(self):
used_req = []
@swift.common.swob.wsgify
def _wsgi_func(req):
used_req.append(req)
raise swift.common.swob.HTTPServerError()
req = swift.common.swob.Request.blank('/hi/there')
resp = req.get_response(_wsgi_func)
self.assertEqual(used_req[0].path, '/hi/there')
self.assertEqual(resp.status_int, 500)
def test_split_path(self):
"""
Copied from swift.common.utils.split_path
"""
def _test_split_path(path, minsegs=1, maxsegs=None, rwl=False):
req = swift.common.swob.Request.blank(path)
return req.split_path(minsegs, maxsegs, rwl)
self.assertRaises(ValueError, _test_split_path, '')
self.assertRaises(ValueError, _test_split_path, '/')
self.assertRaises(ValueError, _test_split_path, '//')
self.assertEqual(_test_split_path('/a'), ['a'])
self.assertRaises(ValueError, _test_split_path, '//a')
self.assertEqual(_test_split_path('/a/'), ['a'])
self.assertRaises(ValueError, _test_split_path, '/a/c')
self.assertRaises(ValueError, _test_split_path, '//c')
self.assertRaises(ValueError, _test_split_path, '/a/c/')
self.assertRaises(ValueError, _test_split_path, '/a//')
self.assertRaises(ValueError, _test_split_path, '/a', 2)
self.assertRaises(ValueError, _test_split_path, '/a', 2, 3)
self.assertRaises(ValueError, _test_split_path, '/a', 2, 3, True)
self.assertEqual(_test_split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(_test_split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, _test_split_path, '/a/c/o/r', 3, 3)
self.assertEqual(_test_split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(_test_split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, _test_split_path, '/a', 5, 4)
self.assertEqual(_test_split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(_test_split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
_test_split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
_test_split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_unicode_path(self):
req = swift.common.swob.Request.blank(u'/\u2661')
self.assertEqual(req.path, quote(u'/\u2661'.encode('utf-8')))
def test_unicode_query(self):
req = swift.common.swob.Request.blank(u'/')
req.query_string = u'x=\u2661'
self.assertEqual(req.params['x'], u'\u2661'.encode('utf-8'))
def test_url2(self):
pi = '/hi/there'
path = pi
req = swift.common.swob.Request.blank(path)
sche = 'http'
exp_url = '%s://localhost%s' % (sche, pi)
self.assertEqual(req.url, exp_url)
qs = 'hello=equal&acl'
path = '%s?%s' % (pi, qs)
s, p = 'unit.test.example.com', '90'
req = swift.common.swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'SERVER_NAME': s,
'SERVER_PORT': p})
exp_url = '%s://%s:%s%s?%s' % (sche, s, p, pi, qs)
self.assertEqual(req.url, exp_url)
host = 'unit.test.example.com'
req = swift.common.swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host + ':80'})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.url, exp_url)
host = 'unit.test.example.com'
sche = 'https'
req = swift.common.swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host + ':443',
'wsgi.url_scheme': sche})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.url, exp_url)
host = 'unit.test.example.com:81'
req = swift.common.swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host,
'wsgi.url_scheme': sche})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.url, exp_url)
def test_as_referer(self):
pi = '/hi/there'
qs = 'hello=equal&acl'
sche = 'https'
host = 'unit.test.example.com:81'
req = swift.common.swob.Request({'REQUEST_METHOD': 'POST',
'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host,
'wsgi.url_scheme': sche})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.as_referer(), 'POST ' + exp_url)
def test_message_length_just_content_length(self):
req = swift.common.swob.Request.blank(
u'/',
environ={'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/'})
self.assertEqual(req.message_length(), None)
req = swift.common.swob.Request.blank(
u'/',
environ={'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/'},
body='x' * 42)
self.assertEqual(req.message_length(), 42)
req.headers['Content-Length'] = 'abc'
try:
req.message_length()
except ValueError as e:
self.assertEqual(str(e), "Invalid Content-Length header value")
else:
self.fail("Expected a ValueError raised for 'abc'")
def test_message_length_transfer_encoding(self):
req = swift.common.swob.Request.blank(
u'/',
environ={'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/'},
headers={'transfer-encoding': 'chunked'},
body='x' * 42)
self.assertEqual(req.message_length(), None)
req.headers['Transfer-Encoding'] = 'gzip,chunked'
try:
req.message_length()
except AttributeError as e:
self.assertEqual(str(e), "Unsupported Transfer-Coding header"
" value specified in Transfer-Encoding header")
else:
self.fail("Expected an AttributeError raised for 'gzip'")
req.headers['Transfer-Encoding'] = 'gzip'
try:
req.message_length()
except ValueError as e:
self.assertEqual(str(e), "Invalid Transfer-Encoding header value")
else:
self.fail("Expected a ValueError raised for 'gzip'")
req.headers['Transfer-Encoding'] = 'gzip,identity'
try:
req.message_length()
except AttributeError as e:
self.assertEqual(str(e), "Unsupported Transfer-Coding header"
" value specified in Transfer-Encoding header")
else:
self.fail("Expected an AttributeError raised for 'gzip,identity'")
class TestStatusMap(unittest.TestCase):
def test_status_map(self):
response_args = []
def start_response(status, headers):
response_args.append(status)
response_args.append(headers)
resp_cls = swift.common.swob.status_map[404]
resp = resp_cls()
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.title, 'Not Found')
body = ''.join(resp({}, start_response))
self.assertTrue('The resource could not be found.' in body)
self.assertEqual(response_args[0], '404 Not Found')
headers = dict(response_args[1])
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
self.assertTrue(int(headers['Content-Length']) > 0)
class TestResponse(unittest.TestCase):
def _get_response(self):
def test_app(environ, start_response):
start_response('200 OK', [])
return ['hi']
req = swift.common.swob.Request.blank('/')
return req.get_response(test_app)
def test_properties(self):
resp = self._get_response()
resp.location = 'something'
self.assertEqual(resp.location, 'something')
self.assertTrue('Location' in resp.headers)
resp.location = None
self.assertTrue('Location' not in resp.headers)
resp.content_type = 'text/plain'
self.assertTrue('Content-Type' in resp.headers)
resp.content_type = None
self.assertTrue('Content-Type' not in resp.headers)
def test_empty_body(self):
resp = self._get_response()
resp.body = ''
self.assertEqual(resp.body, '')
def test_unicode_body(self):
resp = self._get_response()
resp.body = u'\N{SNOWMAN}'
self.assertEqual(resp.body, u'\N{SNOWMAN}'.encode('utf-8'))
def test_call_reifies_request_if_necessary(self):
"""
The actual bug was a HEAD response coming out with a body because the
Request object wasn't passed into the Response object's constructor.
The Response object's __call__ method should be able to reify a
Request object from the env it gets passed.
"""
def test_app(environ, start_response):
start_response('200 OK', [])
return ['hi']
req = swift.common.swob.Request.blank('/')
req.method = 'HEAD'
status, headers, app_iter = req.call_application(test_app)
resp = swift.common.swob.Response(status=status, headers=dict(headers),
app_iter=app_iter)
output_iter = resp(req.environ, lambda *_: None)
self.assertEqual(list(output_iter), [''])
def test_call_preserves_closeability(self):
def test_app(environ, start_response):
start_response('200 OK', [])
yield "igloo"
yield "shindig"
yield "macadamia"
yield "hullabaloo"
req = swift.common.swob.Request.blank('/')
req.method = 'GET'
status, headers, app_iter = req.call_application(test_app)
iterator = iter(app_iter)
self.assertEqual('igloo', next(iterator))
self.assertEqual('shindig', next(iterator))
app_iter.close()
self.assertRaises(StopIteration, iterator.next)
def test_location_rewrite(self):
def start_response(env, headers):
pass
req = swift.common.swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost'})
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://somehost/something')
req = swift.common.swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost:80'})
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://somehost/something')
req = swift.common.swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost:443',
'wsgi.url_scheme': 'http'})
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://somehost:443/something')
req = swift.common.swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost:443',
'wsgi.url_scheme': 'https'})
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'https://somehost/something')
def test_location_rewrite_no_host(self):
def start_response(env, headers):
pass
req = swift.common.swob.Request.blank(
'/', environ={'SERVER_NAME': 'local', 'SERVER_PORT': 80})
del req.environ['HTTP_HOST']
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://local/something')
req = swift.common.swob.Request.blank(
'/', environ={'SERVER_NAME': 'local', 'SERVER_PORT': 81})
del req.environ['HTTP_HOST']
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://local:81/something')
def test_location_no_rewrite(self):
def start_response(env, headers):
pass
req = swift.common.swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost'})
resp = self._get_response()
resp.location = 'http://www.google.com/'
# read response
''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://www.google.com/')
def test_location_no_rewrite_when_told_not_to(self):
def start_response(env, headers):
pass
req = swift.common.swob.Request.blank(
'/', environ={'SERVER_NAME': 'local', 'SERVER_PORT': 81,
'swift.leave_relative_location': True})
del req.environ['HTTP_HOST']
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, '/something')
def test_app_iter(self):
def start_response(env, headers):
pass
resp = self._get_response()
resp.app_iter = ['a', 'b', 'c']
body = ''.join(resp({}, start_response))
self.assertEqual(body, 'abc')
def test_multi_ranges_wo_iter_ranges(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10')])
return ['1234567890']
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=0-9,10-19,20-29'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 10
# read response
''.join(resp._response_iter(resp.app_iter, ''))
self.assertEqual(resp.status, '200 OK')
self.assertEqual(10, resp.content_length)
def test_single_range_wo_iter_range(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10')])
return ['1234567890']
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=0-9'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 10
# read response
''.join(resp._response_iter(resp.app_iter, ''))
self.assertEqual(resp.status, '200 OK')
self.assertEqual(10, resp.content_length)
def test_multi_range_body(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '4')])
return ['abcd']
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=0-9,10-19,20-29'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 100
resp.content_type = 'text/plain; charset=utf8'
content = ''.join(resp._response_iter(None,
('0123456789112345678'
'92123456789')))
self.assertTrue(re.match(('--[a-f0-9]{32}\r\n'
'Content-Type: text/plain; charset=utf8\r\n'
'Content-Range: bytes '
'0-9/100\r\n\r\n0123456789\r\n'
'--[a-f0-9]{32}\r\n'
'Content-Type: text/plain; charset=utf8\r\n'
'Content-Range: bytes '
'10-19/100\r\n\r\n1123456789\r\n'
'--[a-f0-9]{32}\r\n'
'Content-Type: text/plain; charset=utf8\r\n'
'Content-Range: bytes '
'20-29/100\r\n\r\n2123456789\r\n'
'--[a-f0-9]{32}--'), content))
def test_multi_response_iter(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10'),
('Content-Type', 'application/xml')])
return ['0123456789']
app_iter_ranges_args = []
class App_iter(object):
def app_iter_ranges(self, ranges, content_type, boundary, size):
app_iter_ranges_args.append((ranges, content_type, boundary,
size))
for i in range(3):
yield str(i) + 'fun'
yield boundary
def __iter__(self):
for i in range(3):
yield str(i) + 'fun'
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=1-5,8-11'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 12
content = ''.join(resp._response_iter(App_iter(), ''))
boundary = content[-32:]
self.assertEqual(content[:-32], '0fun1fun2fun')
self.assertEqual(app_iter_ranges_args,
[([(1, 6), (8, 12)], 'application/xml',
boundary, 12)])
def test_range_body(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10')])
return ['1234567890']
def start_response(env, headers):
pass
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=1-3'})
resp = swift.common.swob.Response(
body='1234567890', request=req,
conditional_response=True)
body = ''.join(resp([], start_response))
self.assertEqual(body, '234')
self.assertEqual(resp.content_range, 'bytes 1-3/10')
self.assertEqual(resp.status, '206 Partial Content')
# syntactically valid, but does not make sense, so returning 416
# in next couple of cases.
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=-0'})
resp = req.get_response(test_app)
resp.conditional_response = True
body = ''.join(resp([], start_response))
self.assertIn('The Range requested is not available', body)
self.assertEqual(resp.content_length, len(body))
self.assertEqual(resp.status, '416 Requested Range Not Satisfiable')
self.assertEqual(resp.content_range, 'bytes */10')
resp = swift.common.swob.Response(
body='1234567890', request=req,
conditional_response=True)
body = ''.join(resp([], start_response))
self.assertIn('The Range requested is not available', body)
self.assertEqual(resp.content_length, len(body))
self.assertEqual(resp.status, '416 Requested Range Not Satisfiable')
# Syntactically-invalid Range headers "MUST" be ignored
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=3-2'})
resp = req.get_response(test_app)
resp.conditional_response = True
body = ''.join(resp([], start_response))
self.assertEqual(body, '1234567890')
self.assertEqual(resp.status, '200 OK')
self.assertNotIn('Content-Range', resp.headers)
resp = swift.common.swob.Response(
body='1234567890', request=req,
conditional_response=True)
body = ''.join(resp([], start_response))
self.assertEqual(body, '1234567890')
self.assertEqual(resp.status, '200 OK')
def test_content_type(self):
resp = self._get_response()
resp.content_type = 'text/plain; charset=utf8'
self.assertEqual(resp.content_type, 'text/plain')
def test_charset(self):
resp = self._get_response()
resp.content_type = 'text/plain; charset=utf8'
self.assertEqual(resp.charset, 'utf8')
resp.charset = 'utf16'
self.assertEqual(resp.charset, 'utf16')
def test_charset_content_type(self):
resp = swift.common.swob.Response(
content_type='text/plain', charset='utf-8')
self.assertEqual(resp.charset, 'utf-8')
resp = swift.common.swob.Response(
charset='utf-8', content_type='text/plain')
self.assertEqual(resp.charset, 'utf-8')
def test_etag(self):
resp = self._get_response()
resp.etag = 'hi'
self.assertEqual(resp.headers['Etag'], '"hi"')
self.assertEqual(resp.etag, 'hi')
self.assertTrue('etag' in resp.headers)
resp.etag = None
self.assertTrue('etag' not in resp.headers)
def test_host_url_default(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
del env['HTTP_HOST']
self.assertEqual(resp.host_url, 'http://bob:1234')
def test_host_url_default_port_squelched(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '80'
del env['HTTP_HOST']
self.assertEqual(resp.host_url, 'http://bob')
def test_host_url_https(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'https'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
del env['HTTP_HOST']
self.assertEqual(resp.host_url, 'https://bob:1234')
def test_host_url_https_port_squelched(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'https'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '443'
del env['HTTP_HOST']
self.assertEqual(resp.host_url, 'https://bob')
def test_host_url_host_override(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
env['HTTP_HOST'] = 'someother'
self.assertEqual(resp.host_url, 'http://someother')
def test_host_url_host_port_override(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
env['HTTP_HOST'] = 'someother:5678'
self.assertEqual(resp.host_url, 'http://someother:5678')
def test_host_url_host_https(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'https'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
env['HTTP_HOST'] = 'someother:5678'
self.assertEqual(resp.host_url, 'https://someother:5678')
def test_507(self):
resp = swift.common.swob.HTTPInsufficientStorage()
content = ''.join(resp._response_iter(resp.app_iter, resp._body))
self.assertEqual(
content,
'<html><h1>Insufficient Storage</h1><p>There was not enough space '
'to save the resource. Drive: unknown</p></html>')
resp = swift.common.swob.HTTPInsufficientStorage(drive='sda1')
content = ''.join(resp._response_iter(resp.app_iter, resp._body))
self.assertEqual(
content,
'<html><h1>Insufficient Storage</h1><p>There was not enough space '
'to save the resource. Drive: sda1</p></html>')
def test_200_with_body_and_headers(self):
headers = {'Content-Length': '0'}
content = 'foo'
resp = swift.common.swob.HTTPOk(body=content, headers=headers)
self.assertEqual(resp.body, content)
self.assertEqual(resp.content_length, len(content))
def test_init_with_body_headers_app_iter(self):
# body exists but no headers and no app_iter
body = 'ok'
resp = swift.common.swob.Response(body=body)
self.assertEqual(resp.body, body)
self.assertEqual(resp.content_length, len(body))
# body and headers with 0 content_length exist but no app_iter
body = 'ok'
resp = swift.common.swob.Response(
body=body, headers={'Content-Length': '0'})
self.assertEqual(resp.body, body)
self.assertEqual(resp.content_length, len(body))
# body and headers with content_length exist but no app_iter
body = 'ok'
resp = swift.common.swob.Response(
body=body, headers={'Content-Length': '5'})
self.assertEqual(resp.body, body)
self.assertEqual(resp.content_length, len(body))
# body and headers with no content_length exist but no app_iter
body = 'ok'
resp = swift.common.swob.Response(body=body, headers={})
self.assertEqual(resp.body, body)
self.assertEqual(resp.content_length, len(body))
# body, headers with content_length and app_iter exist
resp = swift.common.swob.Response(
body='ok', headers={'Content-Length': '5'}, app_iter=iter([]))
self.assertEqual(resp.content_length, 5)
self.assertEqual(resp.body, '')
# headers with content_length and app_iter exist but no body
resp = swift.common.swob.Response(
headers={'Content-Length': '5'}, app_iter=iter([]))
self.assertEqual(resp.content_length, 5)
self.assertEqual(resp.body, '')
# app_iter exists but no body and headers
resp = swift.common.swob.Response(app_iter=iter([]))
self.assertEqual(resp.content_length, None)
self.assertEqual(resp.body, '')
class TestUTC(unittest.TestCase):
def test_tzname(self):
self.assertEqual(swift.common.swob.UTC.tzname(None), 'UTC')
class TestConditionalIfNoneMatch(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response('200 OK', [('Etag', 'the-etag')])
return ['hi']
def fake_start_response(*a, **kw):
pass
def test_simple_match(self):
# etag matches --> 304
req = swift.common.swob.Request.blank(
'/', headers={'If-None-Match': 'the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, '')
def test_quoted_simple_match(self):
# double quotes don't matter
req = swift.common.swob.Request.blank(
'/', headers={'If-None-Match': '"the-etag"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, '')
def test_list_match(self):
# it works with lists of etags to match
req = swift.common.swob.Request.blank(
'/', headers={'If-None-Match': '"bert", "the-etag", "ernie"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, '')
def test_list_no_match(self):
# no matches --> whatever the original status was
req = swift.common.swob.Request.blank(
'/', headers={'If-None-Match': '"bert", "ernie"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
def test_match_star(self):
# "*" means match anything; see RFC 2616 section 14.24
req = swift.common.swob.Request.blank(
'/', headers={'If-None-Match': '*'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, '')
class TestConditionalIfMatch(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response('200 OK', [('Etag', 'the-etag')])
return ['hi']
def fake_start_response(*a, **kw):
pass
def test_simple_match(self):
# if etag matches, proceed as normal
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': 'the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
def test_simple_conditional_etag_match(self):
# if etag matches, proceed as normal
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': 'not-the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
resp._conditional_etag = 'not-the-etag'
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
def test_quoted_simple_match(self):
# double quotes or not, doesn't matter
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': '"the-etag"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
def test_no_match(self):
# no match --> 412
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': 'not-the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 412)
self.assertEqual(body, '')
def test_simple_conditional_etag_no_match(self):
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': 'the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
resp._conditional_etag = 'not-the-etag'
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 412)
self.assertEqual(body, '')
def test_match_star(self):
# "*" means match anything; see RFC 2616 section 14.24
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': '*'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
def test_match_star_on_404(self):
def fake_app_404(environ, start_response):
start_response('404 Not Found', [])
return ['hi']
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': '*'})
resp = req.get_response(fake_app_404)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 412)
self.assertEqual(body, '')
class TestConditionalIfModifiedSince(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response(
'200 OK', [('Last-Modified', 'Thu, 27 Feb 2014 03:29:37 GMT')])
return ['hi']
def fake_start_response(*a, **kw):
pass
def test_absent(self):
req = swift.common.swob.Request.blank('/')
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
def test_before(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Modified-Since': 'Thu, 27 Feb 2014 03:29:36 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
def test_same(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Modified-Since': 'Thu, 27 Feb 2014 03:29:37 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, '')
def test_greater(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Modified-Since': 'Thu, 27 Feb 2014 03:29:38 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, '')
def test_out_of_range_is_ignored(self):
# All that datetime gives us is a ValueError or OverflowError when
# something is out of range (i.e. less than datetime.datetime.min or
# greater than datetime.datetime.max). Unfortunately, we can't
# distinguish between a date being too old and a date being too new,
# so the best we can do is ignore such headers.
max_date_list = list(datetime.datetime.max.timetuple())
max_date_list[0] += 1 # bump up the year
too_big_date_header = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.struct_time(max_date_list))
req = swift.common.swob.Request.blank(
'/',
headers={'If-Modified-Since': too_big_date_header})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
class TestConditionalIfUnmodifiedSince(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response(
'200 OK', [('Last-Modified', 'Thu, 20 Feb 2014 03:29:37 GMT')])
return ['hi']
def fake_start_response(*a, **kw):
pass
def test_absent(self):
req = swift.common.swob.Request.blank('/')
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
def test_before(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Unmodified-Since': 'Thu, 20 Feb 2014 03:29:36 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 412)
self.assertEqual(body, '')
def test_same(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Unmodified-Since': 'Thu, 20 Feb 2014 03:29:37 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
def test_greater(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Unmodified-Since': 'Thu, 20 Feb 2014 03:29:38 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
def test_out_of_range_is_ignored(self):
# All that datetime gives us is a ValueError or OverflowError when
# something is out of range (i.e. less than datetime.datetime.min or
# greater than datetime.datetime.max). Unfortunately, we can't
# distinguish between a date being too old and a date being too new,
# so the best we can do is ignore such headers.
max_date_list = list(datetime.datetime.max.timetuple())
max_date_list[0] += 1 # bump up the year
too_big_date_header = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.struct_time(max_date_list))
req = swift.common.swob.Request.blank(
'/',
headers={'If-Unmodified-Since': too_big_date_header})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, 'hi')
if __name__ == '__main__':
unittest.main()
|
|
"""
High Level Operations for every infrastructure
"""
from common.exceptions import ProfileNotFound
from common.exceptions import ResourcesNotAvailable
from common.exceptions import PlatformDoesNotExist
from common.sessions import Sessions
from common.platform import Platform
from urllib2 import URLError
from threading import Thread
import uuid
import suds
import time
import logging
class CallBackThread(Thread):
def __init__(self, platform, core_session_id, core_ip, implementor, logger):
Thread.__init__(self)
self.platform = platform
self.core_session_id = core_session_id
self.core_ip = core_ip
self.implementor = implementor
self.logger = logger
def run(self):
"""
This function will run inside a thread and will check the deployment status.
Once the deployment is finished, it will call the core with the endpoint.
Input: core_session_id, core_ip
Output: void
"""
try:
client = suds.client.Client('http://' + self.core_ip + ':8080/axis2/services/CoreServices?wsdl', cache=None)
except URLError as u:
self.logger.debug("Unavailable CallBack Server...")
return
while (self.implementor.allocation_status(self.platform) == "BUILDING"):
self.logger.debug('Waiting to reply for core_session_id %s to %s.', self.core_session_id, self.core_ip)
time.sleep(15)
if self.implementor.allocation_status(self.platform) == "CREATED":
client.service.deployCallback(self.core_session_id, self.platform.get_endpoint())
else:
client.service.deployCallback(self.core_session_id, "FAILED")
return
class InfrastructureAbstraction:
def __init__ (self, implementor, profiles):
self.implementor = implementor
self.profiles = profiles
self.sessions = Sessions()
# Configure Logging
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
def get_profile(self, profile_id):
for profile in self.profiles.keys():
if profile_id == self.profiles[profile]['id']:
return self.profiles[profile]
raise ProfileNotFound(profile_id, self.profiles)
def check_platform_availability(self, profile_id):
"""
Not all profiles are readly available.
For example, a cluster profile may have to wait on queue
for instantiation. In case of cloud profiles, this method is not used.
Should it be only on the cluster abstractions?
True or False. Will not make reservations, at least for now.
profile_id -- the desired profile.
"""
profile = self.get_profile(profile_id)
available = self.implementor.verify_profile_availability(profile)
return available
def create_platform(self, profile_id):
"""
A platform is a container+infrastructure. This method should allocate
the resources and deploy the container.
profile_id -- the desired profile.
"""
profile = self.get_profile(profile_id)
platform_id = uuid.uuid4()
platform = Platform(id = platform_id, profile_id = profile_id,
allocation_id = 0, endpoint = "NO_ENDPOINT",
status = "BUILDING" )
self.sessions.add_platform(platform)
try:
self.implementor.allocate_resources(platform, profile)
except ResourcesNotAvailable as e:
return 0
return platform_id
def create_platform_callback(self, profile_id, core_session_id, remote_ip):
"""
A platform is a container+infrastructure. This method should allocate
the resources and deploy the container.
Input: profile_id, core_session_id, remote_ip for callback
Output: the platform_id, which is the same of core_session_id
"""
self.logger.debug('profile_id: %s, core_session_id: %s, remote_ip: %s', profile_id, core_session_id, remote_ip)
profile = self.get_profile(int(profile_id))
platform = Platform(id = core_session_id, profile_id = profile_id,
allocation_id = 0, endpoint = "NO_ENDPOINT",
status = "BUILDING" )
self.sessions.add_platform(platform)
try:
self.implementor.allocate_resources(platform, profile)
except ResourcesNotAvailable as e:
return 0
# Thread invocation
callback_thread = CallBackThread(platform, core_session_id, remote_ip, self.implementor, self.logger)
callback_thread.start()
return core_session_id
def get_available_platforms(self):
"""
Returns a dictionary of all instantiated platforms:
(platform_id, profile_id)
"""
platforms_ids = self.sessions.get_platform_list()
return dict( (id, self.sessions.get_platform(id).get_profile_id()) for id in platforms_ids if self.sessions.get_platform(id).get_status() != "DESTROYED" )
def get_available_platforms_endpoints(self):
"""
Returns a dictionary of all instantiated platforms endpoints:
(platform_id, endpoint)
"""
platforms_ids = self.sessions.get_platform_list()
return dict( (id, self.sessions.get_platform(id).get_endpoint()) for id in platforms_ids if self.sessions.get_platform(id).get_status() != "DESTROYED" )
def platform_status(self, platform_id):
"""
The platform status will be BUILDING, CREATED, DESTROYED, FAILED
platform_id -- the id of the platform
"""
_id = platform_id
try :
platform_id = uuid.UUID(platform_id)
except ValueError as v:
self.logger.debug("Platform ID is not UUID, created by callback...")
platform_id = _id
except AttributeError as v:
self.logger.debug("Platform ID already is UUID")
platform_id = _id
platform = self.sessions.get_platform(platform_id)
return self.implementor.allocation_status(platform)
def get_platform_endpoint(self, platform_id):
"""
Returns the endpoint.
platform_id -- the id of the platform
"""
_id = platform_id
try :
platform_id = uuid.UUID(platform_id)
except AttributeError as v:
self.logger.debug("Platform ID already is UUID")
platform_id = _id
platform = self.sessions.get_platform(platform_id)
return platform.get_endpoint()
def destroy_platform(self, platform_id):
"""
Destroy the platform.
platform_id -- the id of the platform
"""
_id = platform_id
try :
platform_id = uuid.UUID(platform_id)
except ValueError as v:
self.logger.debug("Platform ID is not UUID, created by callback...")
platform_id = _id
except AttributeError as v:
self.logger.debug("Platform ID already is UUID")
platform_id = _id
try :
platform = self.sessions.get_platform(platform_id)
except PlatformDoesNotExist as e:
self.logger.info("Platform does not exist.")
return 0
deallocation_status = self.implementor.deallocate_resources(platform)
self.sessions.remove_platform(platform_id)
return deallocation_status
def destroy_platform_by_endpoint(self, endpoint):
"""
Destroy the platform with the corresponding endpoint.
Input: the endpoint
Output: the status of the deallocation
"""
self.logger.info("Destroying platform with endpoint: " + endpoint);
platform_id = None
for id in self.sessions.get_platform_list():
_endpoint = self.sessions.get_platform(id).get_endpoint()
if (endpoint == _endpoint):
platform_id = id
if (platform_id == None):
self.logger.info("Platform not found for endpoint: " + endpoint)
return "NOTFOUND"
return self.destroy_platform(platform_id)
|
|
"""
Pythia's Verifiable, Partially-Oblivious Pseudorandom Function (POP) protocol
constructed using the BN-256 pairing-based curves provided by the RELIC library.
Also includes serialization and encoding routines for elements that are commonly
transmitted.
"""
from pbc import *
def eval(w,t,x,msk,s):
"""
Pythia server-side computation of intermediate PRF output.
@w: ensemble key selector (e.g. webserver ID)
@t: tweak (e.g. user ID)
@x: blinded message (element of G1)
@msk: Pythia server's master secret key
@s: state value from Pythia server's key table
@returns: (y, kw, tTile)
where: y: intermediate result
kw: secret key bound to w (needed for proof)
tTilde: hashed tweak (needed for proof)
"""
kw = genKw(w,msk,s)
# TODO: Return cached values for precomputation
tTilde = hashG2(t)
y = pair(x*kw, tTilde)
return y,kw,tTilde
def genKw(w,msk,z):
"""
Generates key Kw using key-selector @w, master secret key @msk, and
table value @z.
@returns Kw as a BigInt.
"""
# Hash inputs into a string of bytes
TAG_KW = "TAG_PYTHIA_KW"
b = hmac(TAG_KW, msk, z + w)
# Convert the string into a long value (no larger than the order of Gt),
# then return a BigInt value.
return BigInt(longFromString(b) % long(orderGt()))
def prove(x,tTilde,kw,y):
return proveGt(x,tTilde,kw,y)
def verify(x, tTilde, y, pi, errorOnFail=True):
return verifyGt(x, tTilde, y, pi, errorOnFail)
@profile
def proveGt(x,tTilde,kw,y):
"""
Generate a zero-knowledge proof that DL(g^kw) == DL(e(x,t)^kw) where
g,e(..) \in Gt.
@return pi = (p,c,u)
"""
# Verify types
assertType(x, G1Element)
assertType(tTilde, G2Element)
# Compute the proof.
beta = pair(x,tTilde)
g = generatorGt()
p = g**kw
v = randomZ(orderGt())
t1 = g**v
t2 = beta**v
c = hashZ(g,p,beta,y,t1,t2)
u = (v- (c*kw)) % orderGt()
return (p,c,u)
@profile
def proveG1(x,tTilde,kw,y):
"""
Generate a zero-knowledge proof that DL(Q*kw) == DL(e(x,tTilde)^kw) where
<Q> = G1.
"""
# Verify types
assertType(x, G1Element)
assertType(tTilde, G2Element)
# Compute the proof.
beta = pair(x,tTilde)
Q = generatorG1()
p = Q*kw
v = randomZ(orderGt())
t1 = Q*v
t2 = beta**v
t1.normalize()
c = hashZ(Q,p,beta,y,t1,t2)
u = (v-(c*kw)) % orderGt()
return (p,c,u)
@profile
def verifyG1(x, tTilde, y, pi, errorOnFail=True):
"""
Verifies a zero-knowledge proof where p \in G1.
@errorOnFail: Raise an exception if the proof does not hold.
"""
# Unpack the proof
p,c,u = pi
# Verify types
assertType(x, G1Element)
assertType(tTilde, G2Element)
assertType(y, GtElement)
assertType(p, G1Element)
# TODO: beta can be pre-computed while waiting for a server response.
Q = generatorG1()
beta = pair(x,tTilde)
# Recompute c'
t1 = Q*u + p*c
t2 = beta**u * y**c
t1.normalize()
cPrime = hashZ(Q,p,beta,y,t1,t2)
# Check computed @c' against server's value @c
if cPrime == c:
return True
if errorOnFail:
raise Exception("zero-knowledge proof failed verification.")
else:
return False
@profile
def verifyGt(x, tTilde, y, pi, errorOnFail=True):
"""
Verifies a zero-knowledge proof.
@x: Blinded message, G1Element, x = HG1(m)*r
@tTilde: hashed tweak, G2Element, t~ = HG2(t)
@y: server response (intermediate result), GtElement
@pi: pi = (p, c, u), zero-knowledge proof from server,
p = g^kw, GtElement; c,u integer values
@errorOnFail: Rasise an exception if the proof does not hold.
"""
# Unpack the proof
p,c,u = pi
# Verify types
assertType(x, G1Element)
assertType(tTilde, G2Element)
assertType(y, GtElement)
assertType(p, GtElement)
# TODO: This can be pre-computed while waiting for a server response.
g = generatorGt()
beta = pair(x,tTilde)
# Recompute c'
t1 = g**u * p**c
t2 = beta**u * y**c
cPrime = hashZ(g,p,beta,y,t1,t2)
# Check computed @c' against server's value @c
if cPrime == c:
return True
if errorOnFail:
raise Exception("zero-knowledge proof failed verification.")
else:
return False
def blind(m, hashfunc=hashG1):
"""
Blinds an arbitrary string or byte array @m using an ephemeral key @r
that can be used to deblind. Computes: x = H(x)^r
@returns (1/r,x)
"""
# Find r with a suitable inverse in Gt
rInv = None
while not rInv:
r = randomZ()
rInv = inverse(r, orderGt())
return rInv, hashfunc(m) * r
def deblind(rInv,y):
"""
Removes blinding using ephemeral key @r on (intermediate result) @y \in Gt.
"""
# Find the multiplicative inverse of @r in Gt.
return y ** rInv
import base64
def _wrap(x, serializeFunc, encodeFunc=base64.urlsafe_b64encode, compress=True):
"""
Wraps an element @x by serializing and then encoding the resulting bytes.
"""
return encodeFunc(serializeFunc(x, compress))
def _unwrap(x, deserializeFunc, decodeFunc=base64.urlsafe_b64decode, compress=True):
"""
Unwraps an element @x by decoding and then deserializing
"""
return deserializeFunc(decodeFunc(x), compress)
def _wrapG1(x):
return _wrap(x, serializeG1)
def _unwrapG1(x):
return _unwrap(x, deserializeG1)
def _wrapGt(x):
return _wrap(x, serializeGt)
def _unwrapGt(x):
return _unwrap(x, deserializeGt)
wrapX = _wrapG1
unwrapX = _unwrapG1
wrapY = _wrapGt
unwrapY = _unwrapGt
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import json
import datetime
import decimal
import mimetypes
import os
import frappe
from frappe import _
import frappe.model.document
import frappe.utils
import frappe.sessions
import werkzeug.utils
from werkzeug.local import LocalProxy
from werkzeug.wsgi import wrap_file
from werkzeug.wrappers import Response
from werkzeug.exceptions import NotFound, Forbidden
from frappe.website.render import render
from frappe.utils import cint
from six import text_type
from six.moves.urllib.parse import quote
from frappe.core.doctype.access_log.access_log import make_access_log
def report_error(status_code):
'''Build error. Show traceback in developer mode'''
if (cint(frappe.db.get_system_setting('allow_error_traceback'))
and (status_code!=404 or frappe.conf.logging)
and not frappe.local.flags.disable_traceback):
frappe.errprint(frappe.utils.get_traceback())
response = build_response("json")
response.status_code = status_code
return response
def build_response(response_type=None):
if "docs" in frappe.local.response and not frappe.local.response.docs:
del frappe.local.response["docs"]
response_type_map = {
'csv': as_csv,
'txt': as_txt,
'download': as_raw,
'json': as_json,
'pdf': as_pdf,
'page': as_page,
'redirect': redirect,
'binary': as_binary
}
return response_type_map[frappe.response.get('type') or response_type]()
def as_csv():
response = Response()
response.mimetype = 'text/csv'
response.charset = 'utf-8'
response.headers["Content-Disposition"] = ("attachment; filename=\"%s.csv\"" % frappe.response['doctype'].replace(' ', '_')).encode("utf-8")
response.data = frappe.response['result']
return response
def as_txt():
response = Response()
response.mimetype = 'text'
response.charset = 'utf-8'
response.headers["Content-Disposition"] = ("attachment; filename=\"%s.txt\"" % frappe.response['doctype'].replace(' ', '_')).encode("utf-8")
response.data = frappe.response['result']
return response
def as_raw():
response = Response()
response.mimetype = frappe.response.get("content_type") or mimetypes.guess_type(frappe.response['filename'])[0] or "application/unknown"
response.headers["Content-Disposition"] = ("attachment; filename=\"%s\"" % frappe.response['filename'].replace(' ', '_')).encode("utf-8")
response.data = frappe.response['filecontent']
return response
def as_json():
make_logs()
response = Response()
if frappe.local.response.http_status_code:
response.status_code = frappe.local.response['http_status_code']
del frappe.local.response['http_status_code']
response.mimetype = 'application/json'
response.charset = 'utf-8'
response.data = json.dumps(frappe.local.response, default=json_handler, separators=(',',':'))
return response
def as_pdf():
response = Response()
response.mimetype = "application/pdf"
encoded_filename = quote(frappe.response['filename'].replace(' ', '_'))
response.headers["Content-Disposition"] = ("filename=\"%s\"" % frappe.response['filename'].replace(' ', '_') + ";filename*=utf-8''%s" % encoded_filename).encode("utf-8")
response.data = frappe.response['filecontent']
return response
def as_binary():
response = Response()
response.mimetype = 'application/octet-stream'
response.headers["Content-Disposition"] = ("filename=\"%s\"" % frappe.response['filename'].replace(' ', '_')).encode("utf-8")
response.data = frappe.response['filecontent']
return response
def make_logs(response = None):
"""make strings for msgprint and errprint"""
if not response:
response = frappe.local.response
if frappe.error_log:
response['exc'] = json.dumps([frappe.utils.cstr(d["exc"]) for d in frappe.local.error_log])
if frappe.local.message_log:
response['_server_messages'] = json.dumps([frappe.utils.cstr(d) for
d in frappe.local.message_log])
if frappe.debug_log and frappe.conf.get("logging") or False:
response['_debug_messages'] = json.dumps(frappe.local.debug_log)
if frappe.flags.error_message:
response['_error_message'] = frappe.flags.error_message
def json_handler(obj):
"""serialize non-serializable data for json"""
# serialize date
import collections
if isinstance(obj, (datetime.date, datetime.timedelta, datetime.datetime)):
return text_type(obj)
elif isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(obj, LocalProxy):
return text_type(obj)
elif isinstance(obj, frappe.model.document.BaseDocument):
doc = obj.as_dict(no_nulls=True)
return doc
elif isinstance(obj, collections.Iterable):
return list(obj)
elif type(obj)==type or isinstance(obj, Exception):
return repr(obj)
else:
raise TypeError("""Object of type %s with value of %s is not JSON serializable""" % \
(type(obj), repr(obj)))
def as_page():
"""print web page"""
return render(frappe.response['route'], http_status_code=frappe.response.get("http_status_code"))
def redirect():
return werkzeug.utils.redirect(frappe.response.location)
def download_backup(path):
try:
frappe.only_for(("System Manager", "Administrator"))
make_access_log(report_name='Backup')
except frappe.PermissionError:
raise Forbidden(_("You need to be logged in and have System Manager Role to be able to access backups."))
return send_private_file(path)
def download_private_file(path):
"""Checks permissions and sends back private file"""
files = frappe.db.get_all('File', {'file_url': path})
can_access = False
# this file might be attached to multiple documents
# if the file is accessible from any one of those documents
# then it should be downloadable
for f in files:
_file = frappe.get_doc("File", f)
can_access = _file.is_downloadable()
if can_access:
make_access_log(doctype='File', document=_file.name, file_type=os.path.splitext(path)[-1][1:])
break
if not can_access:
raise Forbidden(_("You don't have permission to access this file"))
return send_private_file(path.split("/private", 1)[1])
def send_private_file(path):
path = os.path.join(frappe.local.conf.get('private_path', 'private'), path.strip("/"))
filename = os.path.basename(path)
if frappe.local.request.headers.get('X-Use-X-Accel-Redirect'):
path = '/protected/' + path
response = Response()
response.headers['X-Accel-Redirect'] = quote(frappe.utils.encode(path))
else:
filepath = frappe.utils.get_site_path(path)
try:
f = open(filepath, 'rb')
except IOError:
raise NotFound
response = Response(wrap_file(frappe.local.request.environ, f), direct_passthrough=True)
# no need for content disposition and force download. let browser handle its opening.
# Except for those that can be injected with scripts.
extension = os.path.splitext(path)[1]
blacklist = ['.svg', '.html', '.htm', '.xml']
if extension.lower() in blacklist:
response.headers.add('Content-Disposition', 'attachment', filename=filename.encode("utf-8"))
response.mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
return response
def handle_session_stopped():
frappe.respond_as_web_page(_("Updating"),
_("Your system is being updated. Please refresh again after a few moments."),
http_status_code=503, indicator_color='orange', fullpage = True, primary_action=None)
return frappe.website.render.render("message", http_status_code=503)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.backup.models import Backup
import trove.common.apischema as apischema
from trove.common.auth import admin_context
from trove.common import exception
from trove.common.i18n import _
from trove.common import wsgi
from trove.extensions.mgmt.instances import models
from trove.extensions.mgmt.instances import views
from trove.extensions.mgmt.instances.views import DiagnosticsView
from trove.extensions.mgmt.instances.views import HwInfoView
from trove.extensions.mysql import models as mysql_models
from trove.instance import models as instance_models
from trove.instance.service import InstanceController
LOG = logging.getLogger(__name__)
class MgmtInstanceController(InstanceController):
"""Controller for instance functionality."""
schemas = apischema.mgmt_instance
@classmethod
def get_action_schema(cls, body, action_schema):
action_type = body.keys()[0]
return action_schema.get(action_type, {})
@admin_context
def index(self, req, tenant_id, detailed=False):
"""Return all instances."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Indexing a database instance for tenant '%s'") % tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
deleted = None
deleted_q = req.GET.get('deleted', '').lower()
if deleted_q in ['true']:
deleted = True
elif deleted_q in ['false']:
deleted = False
clustered_q = req.GET.get('include_clustered', '').lower()
include_clustered = clustered_q == 'true'
try:
instances = models.load_mgmt_instances(
context, deleted=deleted, include_clustered=include_clustered)
except nova_exceptions.ClientException as e:
LOG.error(e)
return wsgi.Result(str(e), 403)
view_cls = views.MgmtInstancesView
return wsgi.Result(view_cls(instances, req=req).data(), 200)
@admin_context
def show(self, req, tenant_id, id):
"""Return a single instance."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Showing a database instance for tenant '%s'") % tenant_id)
LOG.info(_("id : '%s'\n\n") % id)
context = req.environ[wsgi.CONTEXT_KEY]
deleted_q = req.GET.get('deleted', '').lower()
include_deleted = deleted_q == 'true'
server = models.DetailedMgmtInstance.load(context, id,
include_deleted)
root_history = mysql_models.RootHistory.load(context=context,
instance_id=id)
return wsgi.Result(
views.MgmtInstanceDetailView(
server,
req=req,
root_history=root_history).data(),
200)
@admin_context
def action(self, req, body, tenant_id, id):
LOG.info("req : '%s'\n\n" % req)
LOG.info("Committing an ACTION against instance %s for tenant '%s'"
% (id, tenant_id))
if not body:
raise exception.BadRequest(_("Invalid request body."))
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
_actions = {
'stop': self._action_stop,
'reboot': self._action_reboot,
'migrate': self._action_migrate,
'reset-task-status': self._action_reset_task_status
}
selected_action = None
for key in body:
if key in _actions:
if selected_action is not None:
msg = _("Only one action can be specified per request.")
raise exception.BadRequest(msg)
selected_action = _actions[key]
else:
msg = _("Invalid instance action: %s") % key
raise exception.BadRequest(msg)
if selected_action:
return selected_action(context, instance, body)
else:
raise exception.BadRequest(_("Invalid request body."))
def _action_stop(self, context, instance, body):
LOG.debug("Stopping MySQL on instance %s." % instance.id)
instance.stop_db()
return wsgi.Result(None, 202)
def _action_reboot(self, context, instance, body):
LOG.debug("Rebooting instance %s." % instance.id)
instance.reboot()
return wsgi.Result(None, 202)
def _action_migrate(self, context, instance, body):
LOG.debug("Migrating instance %s." % instance.id)
LOG.debug("body['migrate']= %s" % body['migrate'])
host = body['migrate'].get('host', None)
instance.migrate(host)
return wsgi.Result(None, 202)
def _action_reset_task_status(self, context, instance, body):
LOG.debug("Setting Task-Status to NONE on instance %s." %
instance.id)
instance.reset_task_status()
LOG.debug("Failing backups for instance %s." % instance.id)
Backup.fail_for_instance(instance.id)
return wsgi.Result(None, 202)
@admin_context
def root(self, req, tenant_id, id):
"""Return the date and time root was enabled on an instance,
if ever.
"""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Showing root history for tenant '%s'") % tenant_id)
LOG.info(_("id : '%s'\n\n") % id)
context = req.environ[wsgi.CONTEXT_KEY]
try:
instance_models.Instance.load(context=context, id=id)
except exception.TroveError as e:
LOG.error(e)
return wsgi.Result(str(e), 404)
rhv = views.RootHistoryView(id)
reh = mysql_models.RootHistory.load(context=context, instance_id=id)
if reh:
rhv = views.RootHistoryView(reh.id, enabled=reh.created,
user_id=reh.user)
return wsgi.Result(rhv.data(), 200)
@admin_context
def hwinfo(self, req, tenant_id, id):
"""Return a single instance hardware info."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Showing hardware info for instance '%s'") % id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
hwinfo = instance.get_hwinfo()
return wsgi.Result(HwInfoView(id, hwinfo).data(), 200)
@admin_context
def diagnostics(self, req, tenant_id, id):
"""Return a single instance diagnostics."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Showing a instance diagnostics for instance '%s'") % id)
LOG.info(_("id : '%s'\n\n") % id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
diagnostics = instance.get_diagnostics()
return wsgi.Result(DiagnosticsView(id, diagnostics).data(), 200)
@admin_context
def rpc_ping(self, req, tenant_id, id):
"""Checks if instance is reachable via rpc."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("id : '%s'\n\n") % id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
instance.rpc_ping()
return wsgi.Result(None, 204)
|
|
# orm/loading.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from .. import util
from . import attributes, exc as orm_exc, state as statelib
from .interfaces import EXT_CONTINUE
from ..sql import util as sql_util
from .util import _none_set, state_str
from .. import exc as sa_exc
_new_runid = util.counter()
def instances(query, cursor, context):
"""Return an ORM result as an iterator."""
session = query.session
context.runid = _new_runid()
filter_fns = [ent.filter_fn
for ent in query._entities]
filtered = id in filter_fns
single_entity = len(query._entities) == 1 and \
query._entities[0].supports_single_entity
if filtered:
if single_entity:
filter_fn = id
else:
def filter_fn(row):
return tuple(fn(x) for x, fn in zip(row, filter_fns))
custom_rows = single_entity and \
query._entities[0].custom_rows
try:
(process, labels) = \
list(zip(*[
query_entity.row_processor(query,
context, custom_rows)
for query_entity in query._entities
]))
while True:
context.progress = {}
context.partials = {}
if query._yield_per:
fetch = cursor.fetchmany(query._yield_per)
if not fetch:
break
else:
fetch = cursor.fetchall()
if custom_rows:
rows = []
for row in fetch:
process[0](row, rows)
elif single_entity:
rows = [process[0](row, None) for row in fetch]
else:
rows = [util.KeyedTuple([proc(row, None) for proc in process],
labels) for row in fetch]
if filtered:
rows = util.unique_list(rows, filter_fn)
if context.refresh_state and query._only_load_props \
and context.refresh_state in context.progress:
context.refresh_state._commit(
context.refresh_state.dict, query._only_load_props)
context.progress.pop(context.refresh_state)
statelib.InstanceState._commit_all_states(
list(context.progress.items()),
session.identity_map
)
for state, (dict_, attrs) in context.partials.items():
state._commit(dict_, attrs)
for row in rows:
yield row
if not query._yield_per:
break
except Exception as err:
cursor.close()
util.raise_from_cause(err)
@util.dependencies("sqlalchemy.orm.query")
def merge_result(querylib, query, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session."""
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = len(query._entities) == 1
if single_entity:
if isinstance(query._entities[0], querylib._MapperEntity):
result = [session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive={})
for instance in iterator]
else:
result = list(iterator)
else:
mapped_entities = [i for i, e in enumerate(query._entities)
if isinstance(e, querylib._MapperEntity)]
result = []
keys = [ent._label_name for ent in query._entities]
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load, _recursive={})
result.append(util.KeyedTuple(newrow, keys))
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(session, key, passive):
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary
return instance
try:
state(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(query, key,
refresh_state=None, lockmode=None,
only_load_props=None):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
else:
ident = None
if refresh_state is None:
q = query._clone()
q._get_condition()
else:
q = query._clone()
if ident is not None:
mapper = query._mapper_zero()
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in ident:
nones = set([
_get_params[col].key for col, value in
zip(mapper.primary_key, ident) if value is None
])
_get_clause = sql_util.adapt_criterion_to_null(
_get_clause, nones)
_get_clause = q._adapt_clause(_get_clause, True, False)
q._criterion = _get_clause
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
q._params = params
if lockmode is not None:
version_check = True
q = q.with_lockmode(lockmode)
elif query._for_update_arg is not None:
version_check = True
q._for_update_arg = query._for_update_arg
else:
version_check = False
q._get_options(
populate_existing=bool(refresh_state),
version_check=version_check,
only_load_props=only_load_props,
refresh_state=refresh_state)
q._order_by = None
try:
return q.one()
except orm_exc.NoResultFound:
return None
def instance_processor(mapper, context, path, adapter,
polymorphic_from=None,
only_load_props=None,
refresh_state=None,
polymorphic_discriminator=None):
"""Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
pk_cols = mapper.primary_key
if polymorphic_from or refresh_state:
polymorphic_on = None
else:
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
polymorphic_instances = util.PopulateDict(
_configure_subclass_mapper(
mapper,
context, path, adapter)
)
version_id_col = mapper.version_id_col
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
if polymorphic_on is not None:
polymorphic_on = adapter.columns[polymorphic_on]
if version_id_col is not None:
version_id_col = adapter.columns[version_id_col]
identity_class = mapper._identity_class
new_populators = []
existing_populators = []
eager_populators = []
load_path = context.query._current_path + path \
if context.query._current_path.path \
else path
def populate_state(state, dict_, row, isnew, only_load_props):
if isnew:
if context.propagate_options:
state.load_options = context.propagate_options
if state.load_options:
state.load_path = load_path
if not new_populators:
_populators(mapper, context, path, row, adapter,
new_populators,
existing_populators,
eager_populators
)
if isnew:
populators = new_populators
else:
populators = existing_populators
if only_load_props is None:
for key, populator in populators:
populator(state, dict_, row)
elif only_load_props:
for key, populator in populators:
if key in only_load_props:
populator(state, dict_, row)
session_identity_map = context.session.identity_map
listeners = mapper.dispatch
# legacy events - I'd very much like to yank these totally
translate_row = listeners.translate_row or None
create_instance = listeners.create_instance or None
populate_instance = listeners.populate_instance or None
append_result = listeners.append_result or None
####
populate_existing = context.populate_existing or mapper.always_refresh
invoke_all_eagers = context.invoke_all_eagers
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
else:
is_not_primary_key = _none_set.issubset
def _instance(row, result):
if not new_populators and invoke_all_eagers:
_populators(mapper, context, path, row, adapter,
new_populators,
existing_populators,
eager_populators
)
if translate_row:
for fn in translate_row:
ret = fn(mapper, context, row)
if ret is not EXT_CONTINUE:
row = ret
break
if polymorphic_on is not None:
discriminator = row[polymorphic_on]
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row, result)
# determine identity key
if refresh_state:
identitykey = refresh_state.key
if identitykey is None:
# super-rare condition; a refresh is being called
# on a non-instance-key instance; this is meant to only
# occur within a flush()
identitykey = mapper._identity_key_from_state(refresh_state)
else:
identitykey = (
identity_class,
tuple([row[column] for column in pk_cols])
)
instance = session_identity_map.get(identitykey)
if instance is not None:
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
isnew = state.runid != context.runid
currentload = not isnew
loaded_instance = False
if not currentload and \
version_id_col is not None and \
context.version_check and \
mapper._get_state_attr_by_column(
state,
dict_,
mapper.version_id_col) != \
row[version_id_col]:
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (state_str(state),
mapper._get_state_attr_by_column(
state, dict_,
mapper.version_id_col),
row[version_id_col]))
elif refresh_state:
# out of band refresh_state detected (i.e. its not in the
# session.identity_map) honor it anyway. this can happen
# if a _get() occurs within save_obj(), such as
# when eager_defaults is True.
state = refresh_state
instance = state.obj()
dict_ = attributes.instance_dict(instance)
isnew = state.runid != context.runid
currentload = True
loaded_instance = False
else:
# check for non-NULL values in the primary key columns,
# else no entity is returned for the row
if is_not_primary_key(identitykey[1]):
return None
isnew = True
currentload = True
loaded_instance = True
if create_instance:
for fn in create_instance:
instance = fn(mapper, context,
row, mapper.class_)
if instance is not EXT_CONTINUE:
manager = attributes.manager_of_class(
instance.__class__)
# TODO: if manager is None, raise a friendly error
# about returning instances of unmapped types
manager.setup_instance(instance)
break
else:
instance = mapper.class_manager.new_instance()
else:
instance = mapper.class_manager.new_instance()
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
state.key = identitykey
# attach instance to session.
state.session_id = context.session.hash_key
session_identity_map.add(state)
if currentload or populate_existing:
# state is being fully loaded, so populate.
# add to the "context.progress" collection.
if isnew:
state.runid = context.runid
context.progress[state] = dict_
if populate_instance:
for fn in populate_instance:
ret = fn(mapper, context, row, state,
only_load_props=only_load_props,
instancekey=identitykey, isnew=isnew)
if ret is not EXT_CONTINUE:
break
else:
populate_state(state, dict_, row, isnew, only_load_props)
else:
populate_state(state, dict_, row, isnew, only_load_props)
if loaded_instance:
state.manager.dispatch.load(state, context)
elif isnew:
state.manager.dispatch.refresh(state, context, only_load_props)
elif state in context.partials or state.unloaded or eager_populators:
# state is having a partial set of its attributes
# refreshed. Populate those attributes,
# and add to the "context.partials" collection.
if state in context.partials:
isnew = False
(d_, attrs) = context.partials[state]
else:
isnew = True
attrs = state.unloaded
context.partials[state] = (dict_, attrs)
if populate_instance:
for fn in populate_instance:
ret = fn(mapper, context, row, state,
only_load_props=attrs,
instancekey=identitykey, isnew=isnew)
if ret is not EXT_CONTINUE:
break
else:
populate_state(state, dict_, row, isnew, attrs)
else:
populate_state(state, dict_, row, isnew, attrs)
for key, pop in eager_populators:
if key not in state.unloaded:
pop(state, dict_, row)
if isnew:
state.manager.dispatch.refresh(state, context, attrs)
if result is not None:
if append_result:
for fn in append_result:
if fn(mapper, context, row, state,
result, instancekey=identitykey,
isnew=isnew) is not EXT_CONTINUE:
break
else:
result.append(instance)
else:
result.append(instance)
return instance
return _instance
def _populators(mapper, context, path, row, adapter,
new_populators, existing_populators, eager_populators):
"""Produce a collection of attribute level row processor
callables."""
delayed_populators = []
pops = (new_populators, existing_populators, delayed_populators,
eager_populators)
for prop in mapper._props.values():
for i, pop in enumerate(prop.create_row_processor(
context,
path,
mapper, row, adapter)):
if pop is not None:
pops[i].append((prop.key, pop))
if delayed_populators:
new_populators.extend(delayed_populators)
def _configure_subclass_mapper(mapper, context, path, adapter):
"""Produce a mapper level row processor callable factory for mappers
inheriting this one."""
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" %
discriminator)
if sub_mapper is mapper:
return None
return instance_processor(
sub_mapper,
context,
path,
adapter,
polymorphic_from=mapper)
return configure_subclass_mapper
def load_scalar_attributes(mapper, state, attribute_names):
"""initiate a column-based attribute refresh operation."""
# assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" %
(state_str(state)))
has_key = bool(state.key)
result = False
if mapper.inherits and not mapper.concrete:
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
result = load_on_ident(
session.query(mapper).from_statement(statement),
None,
only_load_props=attribute_names,
refresh_state=state
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [mapper._columntoproperty[col].key
for col in mapper.primary_key]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state))
identity_key = mapper._identity_key_from_state(state)
if (_none_set.issubset(identity_key) and
not mapper.allow_partial_pks) or \
_none_set.issuperset(identity_key):
util.warn("Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either)."
% state_str(state))
return
result = load_on_ident(
session.query(mapper),
identity_key,
refresh_state=state,
only_load_props=attribute_names)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
|
|
# SPDX-License-Identifier: ISC
# -*- coding: utf-8 -*-
"""
Overview
========
pymenuconfig is a small and simple frontend to Kconfiglib that's written
entirely in Python using Tkinter as its GUI toolkit.
Motivation
==========
Kconfig is a nice and powerful framework for build-time configuration and lots
of projects already benefit from using it. Kconfiglib allows to utilize power of
Kconfig by using scripts written in pure Python, without requiring one to build
Linux kernel tools written in C (this can be quite tedious on anything that's
not *nix). The aim of this project is to implement simple and small Kconfiglib
GUI frontend that runs on as much systems as possible.
Tkinter GUI toolkit is a natural choice if portability is considered, as it's
a part of Python standard library and is available virtually in every CPython
installation.
User interface
==============
I've tried to replicate look and fill of Linux kernel 'menuconfig' tool that
many users are used to, including keyboard-oriented control and textual
representation of menus with fixed-width font.
Usage
=====
The pymenuconfig module is executable and parses command-line args, so the
most simple way to run menuconfig is to execute script directly:
python pymenuconfig.py --kconfig Kconfig
As with most command-line tools list of options can be obtained with '--help':
python pymenuconfig.py --help
If installed with setuptools, one can run it like this:
python -m pymenuconfig --kconfig Kconfig
In case you're making a wrapper around menuconfig, you can either call main():
import pymenuconfig
pymenuconfig.main(['--kconfig', 'Kconfig'])
Or import MenuConfig class, instantiate it and manually run Tkinter's mainloop:
import tkinter
import kconfiglib
from pymenuconfig import MenuConfig
kconfig = kconfiglib.Kconfig()
mconf = MenuConfig(kconfig)
tkinter.mainloop()
"""
from __future__ import print_function
import os
import sys
import argparse
import kconfiglib
# Tk is imported differently depending on python major version
if sys.version_info[0] < 3:
import Tkinter as tk
import tkFont as font
import tkFileDialog as filedialog
import tkMessageBox as messagebox
else:
import tkinter as tk
from tkinter import font
from tkinter import filedialog
from tkinter import messagebox
class ListEntry(object):
"""
Represents visible menu node and holds all information related to displaying
menu node in a Listbox.
Instances of this class also handle all interaction with main window.
A node is displayed as a single line of text:
PREFIX INDENT BODY POSTFIX
- The PREFIX is always 3 characters or more and can take following values:
' ' comment, menu, bool choice, etc.
Inside menus:
'< >' bool symbol has value 'n'
'<*>' bool symbol has value 'y'
'[ ]' tristate symbol has value 'n'
'[M]' tristate symbol has value 'm'
'[*]' tristate symbol has value 'y'
'- -' symbol has value 'n' that's not editable
'-M-' symbol has value 'm' that's not editable
'-*-' symbol has value 'y' that's not editable
'(M)' tristate choice has value 'm'
'(*)' tristate choice has value 'y'
'(some value)' value of non-bool/tristate symbols
Inside choices:
'( )' symbol has value 'n'
'(M)' symbol has value 'm'
'(*)' symbol has value 'y'
- INDENT is a sequence of space characters. It's used in implicit menus, and
adds 2 spaces for each nesting level
- BODY is a menu node prompt. '***' is added if node is a comment
- POSTFIX adds '(NEW)', '--->' and selected choice symbol where applicable
Attributes:
node:
MenuNode instance this ListEntry is created for.
visible:
Whether entry should be shown in main window.
text:
String to display in a main window's Listbox.
refresh():
Updates .visible and .text attribute values.
set_tristate_value():
Set value for bool/tristate symbols, value should be one of 0,1,2 or None.
Usually it's called when user presses 'y', 'n', 'm' key.
set_str_value():
Set value for non-bool/tristate symbols, value is a string. Usually called
with a value returned by one of MenuConfig.ask_for_* methods.
toggle():
Toggle bool/tristate symbol value. Called when '<Space>' key is pressed in
a main window. Also selects choice value.
select():
Called when '<Return>' key is pressed in a main window with 'SELECT'
action selected. Displays submenu, choice selection menu, or just selects
choice value. For non-bool/tristate symbols asks MenuConfig window to
handle value input via one of MenuConfig.ask_for_* methods.
show_help():
Called when '<Return>' key is pressed in a main window with 'HELP' action
selected. Prepares text help and calls MenuConfig.show_text() to display
text window.
"""
# How to display value of BOOL and TRISTATE symbols
TRI_TO_DISPLAY = {
0: ' ',
1: 'M',
2: '*'
}
def __init__(self, mconf, node, indent):
self.indent = indent
self.node = node
self.menuconfig = mconf
self.visible = False
self.text = None
def __str__(self):
return self.text
def _is_visible(self):
node = self.node
v = True
v = v and node.prompt is not None
# It should be enough to check if prompt expression is not false and
# for menu nodes whether 'visible if' is not false
v = v and kconfiglib.expr_value(node.prompt[1]) > 0
if node.item == kconfiglib.MENU:
v = v and kconfiglib.expr_value(node.visibility) > 0
# If node references Symbol, then we also account for symbol visibility
# TODO: need to re-think whether this is needed
if isinstance(node.item, kconfiglib.Symbol):
if node.item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE):
v = v and len(node.item.assignable) > 0
else:
v = v and node.item.visibility > 0
return v
def _get_text(self):
"""
Compute textual representation of menu node (a line in ListView)
"""
node = self.node
item = node.item
# Determine prefix
prefix = ' '
if (isinstance(item, kconfiglib.Symbol) and item.choice is None or
isinstance(item, kconfiglib.Choice) and item.type is kconfiglib.TRISTATE):
# The node is for either a symbol outside of choice statement
# or a tristate choice
if item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE):
value = ListEntry.TRI_TO_DISPLAY[item.tri_value]
if len(item.assignable) > 1:
# Symbol is editable
if 1 in item.assignable:
prefix = '<{}>'.format(value)
else:
prefix = '[{}]'.format(value)
else:
# Symbol is not editable
prefix = '-{}-'.format(value)
else:
prefix = '({})'.format(item.str_value)
elif isinstance(item, kconfiglib.Symbol) and item.choice is not None:
# The node is for symbol inside choice statement
if item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE):
value = ListEntry.TRI_TO_DISPLAY[item.tri_value]
if len(item.assignable) > 0:
# Symbol is editable
prefix = '({})'.format(value)
else:
# Symbol is not editable
prefix = '-{}-'.format(value)
else:
prefix = '({})'.format(item.str_value)
# Prefix should be at least 3 chars long
if len(prefix) < 3:
prefix += ' ' * (3 - len(prefix))
# Body
body = ''
if node.prompt is not None:
if item is kconfiglib.COMMENT:
body = '*** {} ***'.format(node.prompt[0])
else:
body = node.prompt[0]
# Suffix
is_menu = False
is_new = False
if (item is kconfiglib.MENU
or isinstance(item, kconfiglib.Symbol) and node.is_menuconfig
or isinstance(item, kconfiglib.Choice)):
is_menu = True
if isinstance(item, kconfiglib.Symbol) and item.user_value is None:
is_new = True
# For symbol inside choice that has 'y' value, '(NEW)' is not displayed
if (isinstance(item, kconfiglib.Symbol)
and item.choice and item.choice.tri_value == 2):
is_new = False
# Choice selection - displayed only for choices which have 'y' value
choice_selection = None
if isinstance(item, kconfiglib.Choice) and node.item.str_value == 'y':
choice_selection = ''
if item.selection is not None:
sym = item.selection
if sym.nodes and sym.nodes[0].prompt is not None:
choice_selection = sym.nodes[0].prompt[0]
text = ' {prefix} {indent}{body}{choice}{new}{menu}'.format(
prefix=prefix,
indent=' ' * self.indent,
body=body,
choice='' if choice_selection is None else ' ({})'.format(
choice_selection
),
new=' (NEW)' if is_new else '',
menu=' --->' if is_menu else ''
)
return text
def refresh(self):
self.visible = self._is_visible()
self.text = self._get_text()
def set_tristate_value(self, value):
"""
Call to change value of BOOL, TRISTATE symbols
It's preferred to use this instead of item.set_value as it handles
all necessary interaction with MenuConfig window when symbol value
changes
None value is accepted but ignored
"""
item = self.node.item
if (isinstance(item, (kconfiglib.Symbol, kconfiglib.Choice))
and item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE)
and value is not None):
if value in item.assignable:
item.set_value(value)
elif value == 2 and 1 in item.assignable:
print(
'Symbol {} value is limited to \'m\'. Setting value \'m\' instead of \'y\''.format(item.name),
file=sys.stderr
)
item.set_value(1)
self.menuconfig.mark_as_changed()
self.menuconfig.refresh_display()
def set_str_value(self, value):
"""
Call to change value of HEX, INT, STRING symbols
It's preferred to use this instead of item.set_value as it handles
all necessary interaction with MenuConfig window when symbol value
changes
None value is accepted but ignored
"""
item = self.node.item
if (isinstance(item, kconfiglib.Symbol)
and item.type in (kconfiglib.INT, kconfiglib.HEX, kconfiglib.STRING)
and value is not None):
item.set_value(value)
self.menuconfig.mark_as_changed()
self.menuconfig.refresh_display()
def toggle(self):
"""
Called when <space> key is pressed
"""
item = self.node.item
if (isinstance(item, (kconfiglib.Symbol, kconfiglib.Choice))
and item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE)):
value = item.tri_value
# Find next value in Symbol/Choice.assignable, or use assignable[0]
try:
it = iter(item.assignable)
while value != next(it):
pass
self.set_tristate_value(next(it))
except StopIteration:
self.set_tristate_value(item.assignable[0])
def select(self):
"""
Called when <Return> key is pressed and SELECT action is selected
"""
item = self.node.item
# - Menu: dive into submenu
# - INT, HEX, STRING symbol: raise prompt to enter symbol value
# - BOOL, TRISTATE symbol inside 'y'-valued Choice: set 'y' value
if (item is kconfiglib.MENU
or isinstance(item, kconfiglib.Symbol) and self.node.is_menuconfig
or isinstance(item, kconfiglib.Choice)):
# Dive into submenu
self.menuconfig.show_submenu(self.node)
elif (isinstance(item, kconfiglib.Symbol) and item.type in
(kconfiglib.INT, kconfiglib.HEX, kconfiglib.STRING)):
# Raise prompt to enter symbol value
ident = self.node.prompt[0] if self.node.prompt is not None else None
title = 'Symbol: {}'.format(item.name)
if item.type is kconfiglib.INT:
# Find enabled ranges
ranges = [
(int(start.str_value), int(end.str_value))
for start, end, expr in item.ranges
if kconfiglib.expr_value(expr) > 0
]
# Raise prompt
self.set_str_value(str(self.menuconfig.ask_for_int(
ident=ident,
title=title,
value=item.str_value,
ranges=ranges
)))
elif item.type is kconfiglib.HEX:
# Find enabled ranges
ranges = [
(int(start.str_value, base=16), int(end.str_value, base=16))
for start, end, expr in item.ranges
if kconfiglib.expr_value(expr) > 0
]
# Raise prompt
self.set_str_value(hex(self.menuconfig.ask_for_hex(
ident=ident,
title=title,
value=item.str_value,
ranges=ranges
)))
elif item.type is kconfiglib.STRING:
# Raise prompt
self.set_str_value(self.menuconfig.ask_for_string(
ident=ident,
title=title,
value=item.str_value
))
elif (isinstance(item, kconfiglib.Symbol)
and item.choice is not None and item.choice.tri_value == 2):
# Symbol inside choice -> set symbol value to 'y'
self.set_tristate_value(2)
def show_help(self):
node = self.node
item = self.node.item
if isinstance(item, (kconfiglib.Symbol, kconfiglib.Choice)):
title = 'Help for symbol: {}'.format(item.name)
if node.help:
help = node.help
else:
help = 'There is no help available for this option.\n'
lines = []
lines.append(help)
lines.append(
'Symbol: {} [={}]'.format(
item.name if item.name else '<UNNAMED>', item.str_value
)
)
lines.append('Type : {}'.format(kconfiglib.TYPE_TO_STR[item.type]))
for n in item.nodes:
lines.append('Prompt: {}'.format(n.prompt[0] if n.prompt else '<EMPTY>'))
lines.append(' Defined at {}:{}'.format(n.filename, n.linenr))
lines.append(' Depends on: {}'.format(kconfiglib.expr_str(n.dep)))
text = '\n'.join(lines)
else:
title = 'Help'
text = 'Help not available for this menu node.\n'
self.menuconfig.show_text(text, title)
self.menuconfig.refresh_display()
class EntryDialog(object):
"""
Creates modal dialog (top-level Tk window) with labels, entry box and two
buttons: OK and CANCEL.
"""
def __init__(self, master, text, title, ident=None, value=None):
self.master = master
dlg = self.dlg = tk.Toplevel(master)
self.dlg.withdraw() #hiden window
dlg.title(title)
# Identifier label
if ident is not None:
self.label_id = tk.Label(dlg, anchor=tk.W, justify=tk.LEFT)
self.label_id['font'] = font.nametofont('TkFixedFont')
self.label_id['text'] = '# {}'.format(ident)
self.label_id.pack(fill=tk.X, padx=2, pady=2)
# Label
self.label = tk.Label(dlg, anchor=tk.W, justify=tk.LEFT)
self.label['font'] = font.nametofont('TkFixedFont')
self.label['text'] = text
self.label.pack(fill=tk.X, padx=10, pady=4)
# Entry box
self.entry = tk.Entry(dlg)
self.entry['font'] = font.nametofont('TkFixedFont')
self.entry.pack(fill=tk.X, padx=2, pady=2)
# Frame for buttons
self.frame = tk.Frame(dlg)
self.frame.pack(padx=2, pady=2)
# Button
self.btn_accept = tk.Button(self.frame, text='< Ok >', command=self.accept)
self.btn_accept['font'] = font.nametofont('TkFixedFont')
self.btn_accept.pack(side=tk.LEFT, padx=2)
self.btn_cancel = tk.Button(self.frame, text='< Cancel >', command=self.cancel)
self.btn_cancel['font'] = font.nametofont('TkFixedFont')
self.btn_cancel.pack(side=tk.LEFT, padx=2)
# Bind Enter and Esc keys
self.dlg.bind('<Return>', self.accept)
self.dlg.bind('<Escape>', self.cancel)
# Dialog is resizable only by width
self.dlg.resizable(1, 0)
# Set supplied value (if any)
if value is not None:
self.entry.insert(0, value)
self.entry.selection_range(0, tk.END)
# By default returned value is None. To caller this means that entry
# process was cancelled
self.value = None
# Modal dialog
dlg.transient(master)
dlg.grab_set()
# Center dialog window
_center_window_above_parent(master, dlg)
self.dlg.deiconify() # show window
# Focus entry field
self.entry.focus_set()
def accept(self, ev=None):
self.value = self.entry.get()
self.dlg.destroy()
def cancel(self, ev=None):
self.dlg.destroy()
class TextDialog(object):
def __init__(self, master, text, title):
self.master = master
dlg = self.dlg = tk.Toplevel(master)
self.dlg.withdraw() #hiden window
dlg.title(title)
dlg.minsize(600,400)
# Text
self.text = tk.Text(dlg, height=1)
self.text['font'] = font.nametofont('TkFixedFont')
self.text.insert(tk.END, text)
# Make text read-only
self.text['state'] = tk.DISABLED
self.text.pack(fill=tk.BOTH, expand=1, padx=4, pady=4)
# Frame for buttons
self.frame = tk.Frame(dlg)
self.frame.pack(padx=2, pady=2)
# Button
self.btn_accept = tk.Button(self.frame, text='< Ok >', command=self.accept)
self.btn_accept['font'] = font.nametofont('TkFixedFont')
self.btn_accept.pack(side=tk.LEFT, padx=2)
# Bind Enter and Esc keys
self.dlg.bind('<Return>', self.accept)
self.dlg.bind('<Escape>', self.cancel)
# Modal dialog
dlg.transient(master)
dlg.grab_set()
# Center dialog window
_center_window_above_parent(master, dlg)
self.dlg.deiconify() # show window
# Focus entry field
self.text.focus_set()
def accept(self, ev=None):
self.dlg.destroy()
def cancel(self, ev=None):
self.dlg.destroy()
class MenuConfig(object):
(
ACTION_SELECT,
ACTION_EXIT,
ACTION_HELP,
ACTION_LOAD,
ACTION_SAVE,
ACTION_SAVE_AS
) = range(6)
ACTIONS = (
('Select', ACTION_SELECT),
('Exit', ACTION_EXIT),
('Help', ACTION_HELP),
('Load', ACTION_LOAD),
('Save', ACTION_SAVE),
('Save as', ACTION_SAVE_AS),
)
def __init__(self, kconfig):
self.kconfig = kconfig
# Instantiate Tk widgets
self.root = tk.Tk()
self.root.withdraw() #hiden window
dlg = self.root
# Window title
dlg.title('pymenuconfig')
# Some empirical window size
dlg.minsize(500, 300)
dlg.geometry('800x600')
# Label that shows position in menu tree
self.label_position = tk.Label(
dlg,
anchor=tk.W,
justify=tk.LEFT,
font=font.nametofont('TkFixedFont')
)
self.label_position.pack(fill=tk.X, padx=2)
# 'Tip' frame and text
self.frame_tip = tk.LabelFrame(
dlg,
text='Tip'
)
self.label_tip = tk.Label(
self.frame_tip,
anchor=tk.W,
justify=tk.LEFT,
font=font.nametofont('TkFixedFont')
)
self.label_tip['text'] = '\n'.join([
'Arrow keys navigate the menu. <Enter> performs selected operation (set of buttons at the bottom)',
'Pressing <Y> includes, <N> excludes, <M> modularizes features',
'Press <Esc> to go one level up. Press <Esc> at top level to exit',
'Legend: [*] built-in [ ] excluded <M> module < > module capable'
])
self.label_tip.pack(fill=tk.BOTH, expand=1, padx=4, pady=4)
self.frame_tip.pack(fill=tk.X, padx=2)
# Main ListBox where all the magic happens
self.list = tk.Listbox(
dlg,
selectmode=tk.SINGLE,
activestyle=tk.NONE,
font=font.nametofont('TkFixedFont'),
height=1,
)
self.list['foreground'] = 'Blue'
self.list['background'] = 'Gray95'
# Make selection invisible
self.list['selectbackground'] = self.list['background']
self.list['selectforeground'] = self.list['foreground']
self.list.pack(fill=tk.BOTH, expand=1, padx=20, ipadx=2)
# Frame with radio buttons
self.frame_radio = tk.Frame(dlg)
self.radio_buttons = []
self.tk_selected_action = tk.IntVar()
for text, value in MenuConfig.ACTIONS:
btn = tk.Radiobutton(
self.frame_radio,
variable=self.tk_selected_action,
value=value
)
btn['text'] = '< {} >'.format(text)
btn['font'] = font.nametofont('TkFixedFont')
btn['indicatoron'] = 0
btn.pack(side=tk.LEFT)
self.radio_buttons.append(btn)
self.frame_radio.pack(anchor=tk.CENTER, pady=4)
# Label with status information
self.tk_status = tk.StringVar()
self.label_status = tk.Label(
dlg,
textvariable=self.tk_status,
anchor=tk.W,
justify=tk.LEFT,
font=font.nametofont('TkFixedFont')
)
self.label_status.pack(fill=tk.X, padx=4, pady=4)
# Center window
_center_window(self.root, dlg)
self.root.deiconify() # show window
# Disable keyboard focus on all widgets ...
self._set_option_to_all_children(dlg, 'takefocus', 0)
# ... except for main ListBox
self.list['takefocus'] = 1
self.list.focus_set()
# Bind keys
dlg.bind('<Escape>', self.handle_keypress)
dlg.bind('<space>', self.handle_keypress)
dlg.bind('<Return>', self.handle_keypress)
dlg.bind('<Right>', self.handle_keypress)
dlg.bind('<Left>', self.handle_keypress)
dlg.bind('<Up>', self.handle_keypress)
dlg.bind('<Down>', self.handle_keypress)
dlg.bind('n', self.handle_keypress)
dlg.bind('m', self.handle_keypress)
dlg.bind('y', self.handle_keypress)
# Register callback that's called when window closes
dlg.wm_protocol('WM_DELETE_WINDOW', self._close_window)
# Init fields
self.node = None
self.node_stack = []
self.all_entries = []
self.shown_entries = []
self.config_path = None
self.unsaved_changes = False
self.status_string = 'NEW CONFIG'
self.update_status()
# Display first child of top level node (the top level node is 'mainmenu')
self.show_node(self.kconfig.top_node)
def _set_option_to_all_children(self, widget, option, value):
widget[option] = value
for n,c in widget.children.items():
self._set_option_to_all_children(c, option, value)
def _invert_colors(self, idx):
self.list.itemconfig(idx, {'bg' : self.list['foreground']})
self.list.itemconfig(idx, {'fg' : self.list['background']})
@property
def _selected_entry(self):
# type: (...) -> ListEntry
active_idx = self.list.index(tk.ACTIVE)
if active_idx >= 0 and active_idx < len(self.shown_entries):
return self.shown_entries[active_idx]
return None
def _select_node(self, node):
# type: (kconfiglib.MenuNode) -> None
"""
Attempts to select entry that corresponds to given MenuNode in main listbox
"""
idx = None
for i, e in enumerate(self.shown_entries):
if e.node is node:
idx = i
break
if idx is not None:
self.list.activate(idx)
self.list.see(idx)
self._invert_colors(idx)
def handle_keypress(self, ev):
keysym = ev.keysym
if keysym == 'Left':
self._select_action(prev=True)
elif keysym == 'Right':
self._select_action(prev=False)
elif keysym == 'Up':
self.refresh_display(reset_selection=False)
elif keysym == 'Down':
self.refresh_display(reset_selection=False)
elif keysym == 'space':
self._selected_entry.toggle()
elif keysym in ('n', 'm', 'y'):
self._selected_entry.set_tristate_value(kconfiglib.STR_TO_TRI[keysym])
elif keysym == 'Return':
action = self.tk_selected_action.get()
if action == self.ACTION_SELECT:
self._selected_entry.select()
elif action == self.ACTION_EXIT:
self._action_exit()
elif action == self.ACTION_HELP:
self._selected_entry.show_help()
elif action == self.ACTION_LOAD:
if self.prevent_losing_changes():
self.open_config()
elif action == self.ACTION_SAVE:
self.save_config()
elif action == self.ACTION_SAVE_AS:
self.save_config(force_file_dialog=True)
elif keysym == 'Escape':
self._action_exit()
pass
def _close_window(self):
if self.prevent_losing_changes():
print('Exiting..')
self.root.destroy()
def _action_exit(self):
if self.node_stack:
self.show_parent()
else:
self._close_window()
def _select_action(self, prev=False):
# Determine the radio button that's activated
action = self.tk_selected_action.get()
if prev:
action -= 1
else:
action += 1
action %= len(MenuConfig.ACTIONS)
self.tk_selected_action.set(action)
def _collect_list_entries(self, start_node, indent=0):
"""
Given first MenuNode of nodes list at some level in menu hierarchy,
collects nodes that may be displayed when viewing and editing that
hierarchy level. Includes implicit menu nodes, i.e. the ones dependent
on 'config' entry via 'if' statement which are internally represented
as children of their dependency
"""
entries = []
n = start_node
while n is not None:
entries.append(ListEntry(self, n, indent))
# If node refers to a symbol (X) and has children, it is either
# 'config' or 'menuconfig'. The children are items inside 'if X'
# block that immediately follows 'config' or 'menuconfig' entry.
# If it's a 'menuconfig' then corresponding MenuNode is shown as a
# regular menu entry. But if it's a 'config', then its children need
# to be shown in the same list with their texts indented
if (n.list is not None
and isinstance(n.item, kconfiglib.Symbol)
and n.is_menuconfig == False):
entries.extend(
self._collect_list_entries(n.list, indent=indent + 1)
)
n = n.next
return entries
def refresh_display(self, reset_selection=False):
# Refresh list entries' attributes
for e in self.all_entries:
e.refresh()
# Try to preserve selection upon refresh
selected_entry = self._selected_entry
# Also try to preserve listbox scroll offset
# If not preserved, the see() method will make wanted item to appear
# at the bottom of the list, even if previously it was in center
scroll_offset = self.list.yview()[0]
# Show only visible entries
self.shown_entries = [e for e in self.all_entries if e.visible]
# Refresh listbox contents
self.list.delete(0, tk.END)
self.list.insert(0, *self.shown_entries)
if selected_entry and not reset_selection:
# Restore scroll position
self.list.yview_moveto(scroll_offset)
# Activate previously selected node
self._select_node(selected_entry.node)
else:
# Select the topmost entry
self.list.activate(0)
self._invert_colors(0)
# Select ACTION_SELECT on each refresh (mimic C menuconfig)
self.tk_selected_action.set(self.ACTION_SELECT)
# Display current location in configuration tree
pos = []
for n in self.node_stack + [self.node]:
pos.append(n.prompt[0] if n.prompt else '[none]')
self.label_position['text'] = u'# ' + u' -> '.join(pos)
def show_node(self, node):
self.node = node
if node.list is not None:
self.all_entries = self._collect_list_entries(node.list)
else:
self.all_entries = []
self.refresh_display(reset_selection=True)
def show_submenu(self, node):
self.node_stack.append(self.node)
self.show_node(node)
def show_parent(self):
if self.node_stack:
select_node = self.node
parent_node = self.node_stack.pop()
self.show_node(parent_node)
# Restore previous selection
self._select_node(select_node)
self.refresh_display(reset_selection=False)
def ask_for_string(self, ident=None, title='Enter string', value=None):
"""
Raises dialog with text entry widget and asks user to enter string
Return:
- str - user entered string
- None - entry was cancelled
"""
text = 'Please enter a string value\n' \
'User <Enter> key to accept the value\n' \
'Use <Esc> key to cancel entry\n'
d = EntryDialog(self.root, text, title, ident=ident, value=value)
self.root.wait_window(d.dlg)
self.list.focus_set()
return d.value
def ask_for_int(self, ident=None, title='Enter integer value', value=None, ranges=()):
"""
Raises dialog with text entry widget and asks user to enter decimal number
Ranges should be iterable of tuples (start, end),
where 'start' and 'end' specify allowed value range (inclusively)
Return:
- int - when valid number that falls within any one of specified ranges is entered
- None - invalid number or entry was cancelled
"""
text = 'Please enter a decimal value. Fractions will not be accepted\n' \
'User <Enter> key to accept the value\n' \
'Use <Esc> key to cancel entry\n'
d = EntryDialog(self.root, text, title, ident=ident, value=value)
self.root.wait_window(d.dlg)
self.list.focus_set()
ivalue = None
if d.value:
try:
ivalue = int(d.value)
except ValueError:
messagebox.showerror('Bad value', 'Entered value \'{}\' is not an integer'.format(d.value))
if ivalue is not None and ranges:
allowed = False
for start, end in ranges:
allowed = allowed or start <= ivalue and ivalue <= end
if not allowed:
messagebox.showerror(
'Bad value',
'Entered value \'{:d}\' is out of range\n'
'Allowed:\n{}'.format(
ivalue,
'\n'.join([' {:d} - {:d}'.format(s,e) for s,e in ranges])
)
)
ivalue = None
return ivalue
def ask_for_hex(self, ident=None, title='Enter hexadecimal value', value=None, ranges=()):
"""
Raises dialog with text entry widget and asks user to enter decimal number
Ranges should be iterable of tuples (start, end),
where 'start' and 'end' specify allowed value range (inclusively)
Return:
- int - when valid number that falls within any one of specified ranges is entered
- None - invalid number or entry was cancelled
"""
text = 'Please enter a hexadecimal value\n' \
'User <Enter> key to accept the value\n' \
'Use <Esc> key to cancel entry\n'
d = EntryDialog(self.root, text, title, ident=ident, value=value)
self.root.wait_window(d.dlg)
self.list.focus_set()
hvalue = None
if d.value:
try:
hvalue = int(d.value, base=16)
except ValueError:
messagebox.showerror('Bad value', 'Entered value \'{}\' is not a hexadecimal value'.format(d.value))
if hvalue is not None and ranges:
allowed = False
for start, end in ranges:
allowed = allowed or start <= hvalue and hvalue <= end
if not allowed:
messagebox.showerror(
'Bad value',
'Entered value \'0x{:x}\' is out of range\n'
'Allowed:\n{}'.format(
hvalue,
'\n'.join([' 0x{:x} - 0x{:x}'.format(s,e) for s,e in ranges])
)
)
hvalue = None
return hvalue
def show_text(self, text, title='Info'):
"""
Raises dialog with read-only text view that contains supplied text
"""
d = TextDialog(self.root, text, title)
self.root.wait_window(d.dlg)
self.list.focus_set()
def mark_as_changed(self):
"""
Marks current config as having unsaved changes
Should be called whenever config value is changed
"""
self.unsaved_changes = True
self.update_status()
def set_status_string(self, status):
"""
Sets status string displayed at the bottom of the window
"""
self.status_string = status
self.update_status()
def update_status(self):
"""
Updates status bar display
Status bar displays:
- unsaved status
- current config path
- status string (see set_status_string())
"""
self.tk_status.set('{} [{}] {}'.format(
'<UNSAVED>' if self.unsaved_changes else '',
self.config_path if self.config_path else '',
self.status_string
))
def _check_is_visible(self, node):
v = True
v = v and node.prompt is not None
# It should be enough to check if prompt expression is not false and
# for menu nodes whether 'visible if' is not false
v = v and kconfiglib.expr_value(node.prompt[1]) > 0
if node.item == kconfiglib.MENU:
v = v and kconfiglib.expr_value(node.visibility) > 0
# If node references Symbol, then we also account for symbol visibility
# TODO: need to re-think whether this is needed
if isinstance(node.item, kconfiglib.Symbol):
if node.item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE):
v = v and len(node.item.assignable) > 0
else:
v = v and node.item.visibility > 0
return v
def config_is_changed(self):
is_changed = False
node = self.kconfig.top_node.list
if not node:
# Empty configuration
return is_changed
while 1:
item = node.item
if isinstance(item, kconfiglib.Symbol) and item.user_value is None and self._check_is_visible(node):
is_changed = True
print("Config \"# {}\" has changed, need save config file\n".format(node.prompt[0]))
break;
# Iterative tree walk using parent pointers
if node.list:
node = node.list
elif node.next:
node = node.next
else:
while node.parent:
node = node.parent
if node.next:
node = node.next
break
else:
break
return is_changed
def prevent_losing_changes(self):
"""
Checks if there are unsaved changes and asks user to save or discard them
This routine should be called whenever current config is going to be discarded
Raises the usual 'Yes', 'No', 'Cancel' prompt.
Return:
- True: caller may safely drop current config state
- False: user needs to continue work on current config ('Cancel' pressed or saving failed)
"""
if self.config_is_changed() == True:
self.mark_as_changed()
if not self.unsaved_changes:
return True
res = messagebox.askyesnocancel(
parent=self.root,
title='Unsaved changes',
message='Config has unsaved changes. Do you want to save them?'
)
if res is None:
return False
elif res is False:
return True
# Otherwise attempt to save config and succeed only if config has been saved successfully
saved = self.save_config()
return saved
def open_config(self, path=None):
if path is None:
# Create open dialog. Either existing file is selected or no file is selected as a result
path = filedialog.askopenfilename(
parent=self.root,
title='Open config..',
initialdir=os.path.dirname(self.config_path) if self.config_path else os.getcwd(),
filetypes=(('.config files', '*.config'), ('All files', '*.*'))
)
if not path or not os.path.isfile(path):
return False
path = os.path.abspath(path)
print('Loading config: \'{}\''.format(path))
# Try to open given path
# If path does not exist, we still set current config path to it but don't load anything
self.unsaved_changes = False
self.config_path = path
if not os.path.exists(path):
self.set_status_string('New config')
self.mark_as_changed()
return True
# Load config and set status accordingly
try:
self.kconfig.load_config(path)
except IOError as e:
self.set_status_string('Failed to load: \'{}\''.format(path))
self.refresh_display()
print('Failed to load config \'{}\': {}'.format(path, e))
return False
self.set_status_string('Opened config')
self.refresh_display()
return True
def save_config(self, force_file_dialog=False):
path = self.config_path
if path is None or force_file_dialog:
path = filedialog.asksaveasfilename(
parent=self.root,
title='Save config as..',
initialdir=os.path.dirname(self.config_path) if self.config_path else os.getcwd(),
initialfile=os.path.basename(self.config_path) if self.config_path else None,
defaultextension='.config',
filetypes=(('.config files', '*.config'), ('All files', '*.*'))
)
if not path:
return False
path = os.path.abspath(path)
print('Saving config: \'{}\''.format(path))
# Try to save config to selected path
try:
self.kconfig.write_config(path, header="#\n# Automatically generated file; DO NOT EDIT.\n")
self.unsaved_changes = False
self.config_path = path
self.set_status_string('Saved config')
except IOError as e:
self.set_status_string('Failed to save: \'{}\''.format(path))
print('Save failed: {}'.format(e), file=sys.stderr)
return False
return True
def _center_window(root, window):
# type: (tk.Tk, tk.Toplevel) -> None
"""
Attempts to center window on screen
"""
root.update_idletasks()
# root.eval('tk::PlaceWindow {!s} center'.format(
# window.winfo_pathname(window.winfo_id())
# ))
w = window.winfo_width()
h = window.winfo_height()
ws = window.winfo_screenwidth()
hs = window.winfo_screenheight()
x = (ws / 2) - (w / 2)
y = (hs / 2) - (h / 2)
window.geometry('+{:d}+{:d}'.format(int(x), int(y)))
window.lift()
window.focus_force()
def _center_window_above_parent(root, window):
# type: (tk.Tk, tk.Toplevel) -> None
"""
Attempts to center window above its parent window
"""
# root.eval('tk::PlaceWindow {!s} center'.format(
# window.winfo_pathname(window.winfo_id())
# ))
root.update_idletasks()
parent = window.master
w = window.winfo_width()
h = window.winfo_height()
px = parent.winfo_rootx()
py = parent.winfo_rooty()
pw = parent.winfo_width()
ph = parent.winfo_height()
x = px + (pw / 2) - (w / 2)
y = py + (ph / 2) - (h / 2)
window.geometry('+{:d}+{:d}'.format(int(x), int(y)))
window.lift()
window.focus_force()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
# Instantiate cmd options parser
parser = argparse.ArgumentParser(
description='Interactive Kconfig configuration editor'
)
parser.add_argument(
'--kconfig',
metavar='FILE',
type=str,
default='Kconfig',
help='path to root Kconfig file'
)
parser.add_argument(
'--config',
metavar='FILE',
type=str,
help='path to .config file to load'
)
args = parser.parse_args(argv)
kconfig_path = args.kconfig
config_path = args.config
# Verify that Kconfig file exists
if not os.path.isfile(kconfig_path):
raise RuntimeError('\'{}\': no such file'.format(kconfig_path))
# Parse Kconfig files
kconf = kconfiglib.Kconfig(filename=kconfig_path)
mc = MenuConfig(kconf)
# If config file was specified, load it
if config_path:
mc.open_config(config_path)
tk.mainloop()
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2016, Matt Layman
import inspect
import os
import tempfile
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from tap.i18n import _
from tap.tests import TestCase
from tap.tracker import Tracker
class TestTracker(TestCase):
def _make_header(self, test_case):
return _('# TAP results for {test_case}').format(test_case=test_case)
def test_has_test_cases(self):
tracker = Tracker()
self.assertEqual(tracker._test_cases, {})
def test_tracks_class(self):
tracker = Tracker()
tracker._track('FakeTestClass')
self.assertEqual(tracker._test_cases.get('FakeTestClass'), [])
def test_adds_ok(self):
tracker = Tracker()
tracker.add_ok('FakeTestCase', 'a description')
line = tracker._test_cases['FakeTestCase'][0]
self.assertTrue(line.ok)
self.assertEqual(line.description, 'a description')
def test_adds_not_ok(self):
tracker = Tracker()
tracker.add_not_ok('FakeTestCase', 'a description')
line = tracker._test_cases['FakeTestCase'][0]
self.assertFalse(line.ok)
self.assertEqual(line.description, 'a description')
def test_adds_skip(self):
tracker = Tracker()
tracker.add_skip('FakeTestCase', 'a description', 'a reason')
line = tracker._test_cases['FakeTestCase'][0]
self.assertTrue(line.ok)
self.assertEqual(line.description, 'a description')
self.assertEqual(line.directive.text, 'SKIP a reason')
def test_generates_tap_reports_in_new_outdir(self):
tempdir = tempfile.mkdtemp()
outdir = os.path.join(tempdir, 'non', 'existent', 'path')
tracker = Tracker(outdir=outdir)
tracker.add_ok('FakeTestCase', 'I should be in the specified dir.')
tracker.generate_tap_reports()
tap_file = os.path.join(outdir, 'FakeTestCase.tap')
self.assertTrue(os.path.exists(tap_file))
def test_generates_tap_reports_in_existing_outdir(self):
outdir = tempfile.mkdtemp()
tracker = Tracker(outdir=outdir)
tracker.add_ok('FakeTestCase', 'I should be in the specified dir.')
tracker.generate_tap_reports()
tap_file = os.path.join(outdir, 'FakeTestCase.tap')
self.assertTrue(os.path.exists(tap_file))
def test_results_not_combined_by_default(self):
tracker = Tracker()
self.assertFalse(tracker.combined)
def test_individual_report_has_no_plan_when_combined(self):
outdir = tempfile.mkdtemp()
tracker = Tracker(outdir=outdir, combined=True)
tracker.add_ok('FakeTestCase', 'Look ma, no plan!')
out_file = StringIO()
tracker.generate_tap_report(
'FakeTestCase', tracker._test_cases['FakeTestCase'], out_file)
report = out_file.getvalue()
self.assertTrue('Look ma' in report)
self.assertFalse('1..' in report)
def test_combined_results_in_one_file(self):
outdir = tempfile.mkdtemp()
tracker = Tracker(outdir=outdir, combined=True)
tracker.add_ok('FakeTestCase', 'YESSS!')
tracker.add_ok('DifferentFakeTestCase', 'GOAAL!')
tracker.generate_tap_reports()
self.assertFalse(
os.path.exists(os.path.join(outdir, 'FakeTestCase.tap')))
self.assertFalse(
os.path.exists(os.path.join(outdir, 'DifferentFakeTestCase.tap')))
with open(os.path.join(outdir, 'testresults.tap'), 'r') as f:
report = f.read()
expected = inspect.cleandoc(
"""{header_1}
ok 1 - YESSS!
{header_2}
ok 2 - GOAAL!
1..2
""".format(
header_1=self._make_header('FakeTestCase'),
header_2=self._make_header('DifferentFakeTestCase')))
self.assertEqual(report.strip(), expected)
def test_tracker_does_not_stream_by_default(self):
tracker = Tracker()
self.assertFalse(tracker.streaming)
def test_tracker_has_stream(self):
tracker = Tracker()
self.assertTrue(tracker.stream is None)
def test_add_ok_writes_to_stream_while_streaming(self):
stream = StringIO()
tracker = Tracker(streaming=True, stream=stream)
tracker.add_ok('FakeTestCase', 'YESSS!')
tracker.add_ok('AnotherTestCase', 'Sure.')
expected = inspect.cleandoc(
"""{header_1}
ok 1 - YESSS!
{header_2}
ok 2 - Sure.
""".format(
header_1=self._make_header('FakeTestCase'),
header_2=self._make_header('AnotherTestCase')))
self.assertEqual(stream.getvalue().strip(), expected)
def test_add_not_ok_writes_to_stream_while_streaming(self):
stream = StringIO()
tracker = Tracker(streaming=True, stream=stream)
tracker.add_not_ok('FakeTestCase', 'YESSS!')
expected = inspect.cleandoc(
"""{header}
not ok 1 - YESSS!
""".format(
header=self._make_header('FakeTestCase')))
self.assertEqual(stream.getvalue().strip(), expected)
def test_add_skip_writes_to_stream_while_streaming(self):
stream = StringIO()
tracker = Tracker(streaming=True, stream=stream)
tracker.add_skip('FakeTestCase', 'YESSS!', 'a reason')
expected = inspect.cleandoc(
"""{header}
ok 1 - YESSS! # SKIP a reason
""".format(
header=self._make_header('FakeTestCase')))
self.assertEqual(stream.getvalue().strip(), expected)
def test_streaming_does_not_write_files(self):
outdir = tempfile.mkdtemp()
stream = StringIO()
tracker = Tracker(outdir=outdir, streaming=True, stream=stream)
tracker.add_ok('FakeTestCase', 'YESSS!')
tracker.generate_tap_reports()
self.assertFalse(
os.path.exists(os.path.join(outdir, 'FakeTestCase.tap')))
def test_streaming_writes_plan(self):
stream = StringIO()
tracker = Tracker(streaming=True, stream=stream)
tracker.combined_line_number = 42
tracker.generate_tap_reports()
self.assertEqual(stream.getvalue(), '1..42\n')
def test_get_default_tap_file_path(self):
tracker = Tracker()
file_path = tracker._get_tap_file_path('foo')
self.assertEqual('foo.tap', file_path)
def test_sanitizes_tap_file_path(self):
tracker = Tracker()
file_path = tracker._get_tap_file_path('an awful \\ testcase / name\n')
self.assertEqual('an-awful---testcase---name-.tap', file_path)
def test_adds_not_ok_with_diagnostics(self):
tracker = Tracker()
tracker.add_not_ok(
'FakeTestCase', 'a description', diagnostics='# more info\n')
line = tracker._test_cases['FakeTestCase'][0]
self.assertEqual('# more info\n', line.diagnostics)
def test_header_displayed_by_default(self):
tracker = Tracker()
self.assertTrue(tracker.header)
def test_header_set_by_init(self):
tracker = Tracker(header=False)
self.assertFalse(tracker.header)
def test_does_not_write_header(self):
stream = StringIO()
tracker = Tracker(streaming=True, stream=stream, header=False)
tracker.add_skip('FakeTestCase', 'YESSS!', 'a reason')
expected = inspect.cleandoc(
"""ok 1 - YESSS! # SKIP a reason""")
self.assertEqual(stream.getvalue().strip(), expected)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Automatic quantization toolkit."""
import tvm.ir
import tvm
from tvm.runtime import Object
from . import _quantize
from ._calibrate import calibrate
from ._partition_conversions import partition_conversions
from .. import expr as _expr
from .. import transform as _transform
class QAnnotateKind(object):
"""Denote the kind of annotation field, corresponding
to different nbit configure."""
IDENTITY = 0
INPUT = 1
WEIGHT = 2
ACTIVATION = 3
def kind2str(kind):
"""Convert a `QAnnotateKind` to string"""
str_map = {
QAnnotateKind.INPUT: "input",
QAnnotateKind.WEIGHT: "weight",
QAnnotateKind.ACTIVATION: "activation",
QAnnotateKind.IDENTITY: "identity",
}
assert kind in str_map
return str_map[kind]
def _forward_op(ref_call, args):
"""forward the operator of ref_call with provided arguments"""
return _expr.Call(ref_call.op, args, ref_call.attrs, ref_call.type_args)
@tvm._ffi.register_object("relay.quantize.QConfig")
class QConfig(Object):
"""Configure the quantization behavior by setting config variables.
Note
----
This object is backed by node system in C++, with arguments that can be
exchanged between python and C++.
Do not construct directly, use qconfig instead.
The fields that are backed by the C++ node are immutable once an instance
is constructed. See _node_defaults for the fields.
"""
_node_defaults = {
"nbit_input": 8,
"nbit_weight": 8,
"nbit_activation": 32,
"dtype_input": "int8",
"dtype_weight": "int8",
"dtype_activation": "int32",
"calibrate_mode": "global_scale",
"global_scale": 8.0,
"weight_scale": "power2",
"skip_dense_layer": True,
"skip_conv_layers": [0],
"do_simulation": False,
"round_for_shift": True,
"debug_enabled_ops": None,
"rounding": "UPWARD",
"calibrate_chunk_by": -1,
"partition_conversions": "disabled",
}
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
super(QConfig, self).__init__(handle)
self.handle = handle
def guard(self, ref_call):
"""Return true if op is enabled, otherwise return false"""
op_name = ref_call.op.name
if self.debug_enabled_ops is not None:
name_list = [x.value for x in self.debug_enabled_ops]
if op_name not in name_list:
return False
return True
def get_nbit_by_kind(self, kind):
name = kind2str(kind)
return getattr(self, "nbit_" + name)
def get_dtype_by_kind(self, kind):
name = kind2str(kind)
return getattr(self, "dtype_" + name)
def __enter__(self):
# pylint: disable=protected-access
_quantize._EnterQConfigScope(self)
return self
def __exit__(self, ptype, value, trace):
_quantize._ExitQConfigScope()
def __setattr__(self, name, value):
if name in QConfig._node_defaults:
raise AttributeError("'%s' object cannot set attribute '%s'" % (str(type(self)), name))
return super(QConfig, self).__setattr__(name, value)
def current_qconfig():
"""Get the current quantization configuration."""
return _quantize._GetCurrentQConfig()
def qconfig(**kwargs):
"""Configure the quantization behavior by setting config variables.
Parameters
---------
nbit_dict: dict of QAnnotateKind -> int
Number of bit for every kind of annotate field.
calibrate_mode: str
The calibration mode. 'global_scale' or 'kl_divergence'.
global_scale: use global scale
kl_divergence: find scales by kl divergence on the dataset.
global_scale: float
The global scale for calibration.
weight_scale: str
The way to calculate scales for weights (annotated with QAnnotateKind.WEIGHT).
power2: Find the maximum of the absolute value of the tensor, and then round up to power
of two.
max: Find the maximum of the absolute value of the tensor
skip_dense_layer: boolean
Whether to skip all nn.dense layer type. By default are skipped.
skip_conv_layers: list
Specifying which layers to be skipped. Provide a list of indices
that indicate which conv2d layers to leave untouched. Start from 0.
do_simulation: boolean
Whether to do simulation with float operation only.
round_for_shift: boolean
Whether to add bias for rounding during shift.
debug_enabled_ops: None or list of str
Partially quantize specified operators for debugging. The default value
is None, which means will try to call all operartors' annotate rewrite
function.
rounding: "UPWARD" or "TONEAREST"
Rounding direction for fixed point multiplications.
partition_conversions: 'disabled', 'enabled', or 'fully_integral'
If set to 'enabled' or 'fully_integral', partitions a quantized
result into a module containing
a prefix function (consisting of input conversion into the quantized data space),
a middle function (consisting of the core quantized network),
a suffix function (consisting of output dequantization),
and a main function (that calls the prefix, middle, and suffix functions in succession).
If set to 'fully_integral' and there are unquantized operators in the result,
an exception is raised.
The default value is 'disabled'.
Returns
-------
config: QConfig
The quantization configuration
"""
node_args = {k: v if k not in kwargs else kwargs[k] for k, v in QConfig._node_defaults.items()}
return tvm.ir.make_node("relay.quantize.QConfig", **node_args)
class QuantizeContext(object):
"""An internal used global context object for annotation,
for putting some state variables like `conv2d_counter`."""
Current = None
def __init__(self):
self.qnode_map = dict()
self._conv2d_counter = 0
self._stop_quantize = False
def check_to_skip(self, ref_call):
"""Check the index of conv2d layer to decide whether to
skip the current operator."""
if self._stop_quantize:
return True
if current_qconfig().skip_conv_layers is not None:
# check skip conv layers
skipped_indices = [int(x) for x in current_qconfig().skip_conv_layers]
if self._conv2d_counter in skipped_indices:
if ref_call.op.name == "nn.conv2d":
self._conv2d_counter += 1
return True
if ref_call.op.name == "nn.conv2d":
self._conv2d_counter += 1
return False
def stop_quantize(self):
self._stop_quantize = True
def reset(self):
self._conv2d_counter = 0
self._stop_quantize = False
def __enter__(self):
self.reset()
return self
def __exit__(self, ptype, value, traceback):
pass
def quantize_context():
"""Get the global singleton scope"""
if QuantizeContext.Current is None:
QuantizeContext.Current = QuantizeContext()
return QuantizeContext.Current
def partition():
"""Partition graph into small low-precision sections by `cast_hint` and
`stop_fusion`.
Returns
-------
ret: tvm.transform.Pass
The registered pass for VTA rewrite.
"""
return _quantize.QuantizePartition()
def annotate():
"""Given a float32 graph, this pass will rewrite the graph and return
a graph which simulates the error brought by the current quantization
scheme.
Returns
-------
ret: tvm.transform.Pass
The registered pass for quantization annotation.
"""
return _quantize.QuantizeAnnotate()
def realize():
"""The realize pass will transform the simulated quantized graph, which
actually computes with float32, to a real low-bit integer graph. It will
replace the `simulated_quantize` with several fine-grained operators like
add, multiply, and shift as much as possible for better performance.
Returns
-------
ret: tvm.transform.Pass
The registered pass for quantization realization.
"""
return _quantize.QuantizeRealize()
def _bind_params(func, params):
"""Bind the params to the expression."""
name_dict = {}
for arg in func.params:
name = arg.name_hint
if name in name_dict:
name_dict[name] = None
else:
name_dict[name] = arg
bind_dict = {}
for k, v in params.items():
if k not in name_dict:
continue
arg = name_dict[k]
if arg is None:
raise ValueError("Multiple args in the function have name %s" % k)
bind_dict[arg] = _expr.const(v)
return _expr.bind(func, bind_dict)
def prerequisite_optimize(mod, params=None):
"""Prerequisite optimization passes for quantization. Perform
"SimplifyInference", "FoldScaleAxis", "FoldConstant", and
"CanonicalizeOps" optimization before quantization."""
optimize = tvm.transform.Sequential(
[
_transform.SimplifyInference(),
_transform.FoldConstant(),
_transform.FoldScaleAxis(),
_transform.CanonicalizeOps(),
_transform.FoldConstant(),
]
)
if params:
mod["main"] = _bind_params(mod["main"], params)
mod = optimize(mod)
return mod
def quantize(mod, params=None, dataset=None):
"""The quantization procedure. Before running the three main
procedure of quantization, "annotate", "calibrate" and "realize"
, we need to do "SimplifyInference", "FoldScaleAxis", "FoldConstant"
first for optimizing.
Parameters
---------
mod: Module
The original module.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
dataset: list of dict of Var -> NDArray
The calibration dataset.
Returns
-------
ret: Function
The graph after quantization
"""
mod = prerequisite_optimize(mod, params)
calibrate_pass = tvm.transform.module_pass(
calibrate(dataset), opt_level=1, name="QuantizeCalibrate"
)
quant_passes = [partition(), annotate(), calibrate_pass, tvm.relay.transform.InferType()]
if not current_qconfig().do_simulation:
quant_passes.append(realize())
quant_passes.append(_transform.FoldConstant())
quantize_seq = tvm.transform.Sequential(quant_passes)
with tvm.transform.PassContext(
opt_level=3, required_pass=["QuantizeAnnotate", "QuantizeCalibrate", "QuantizeRealize"]
):
with quantize_context():
mod = quantize_seq(mod)
q_cfg = current_qconfig()
assert q_cfg.partition_conversions in ["disabled", "enabled", "fully_integral"]
if q_cfg.partition_conversions != "disabled":
quantized_dtypes = {q_cfg.dtype_input, q_cfg.dtype_weight, q_cfg.dtype_activation}
ensure_fully_integral = q_cfg.partition_conversions == "fully_integral"
return partition_conversions(mod, quantized_dtypes, ensure_fully_integral)
return mod
|
|
"""Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
A client for accessing Big Query.
Authentication is performed using a checked in credentials file with entries
for various environments.
Clients looking to do other forms of authentication should inherit from this
class and overwrite the __init__ method.
Code in this module is based heavily off the documented example found here:
https://developers.google.com/bigquery/loading-data-into-bigquery#storageimport
"""
__author__ = 'joemu@google.com (Joe Allan Muharsky)'
import hashlib
import json
import logging
import random
import time
import uuid
from apiclient.discovery import build_from_document
from apiclient.errors import HttpError
from perfkit.common import big_query_result_util as result_util
from perfkit.common import credentials_lib
from perfkit.common import data_source_config as config
from perfkit.common import http_util
DISCOVERY_FILE = 'config/big_query_v2_rest.json'
# TODO: Remove methods that aren't useful for Explorer (a lot of them are
# focused on the processor/import routines.
# BigQuery API Settings
SCOPE = 'https://www.googleapis.com/auth/bigquery'
DATASET_ID = 'samples_mart'
TEMP_DATASET_ID = 'samples_mart_temp'
DEFAULT_QUERY_TIMEOUT = 60
TARGET_TABLE_ID = 'results'
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
SLEEP_BETWEEN_RETRIES = 10 # In seconds
SLEEP_BETWEEN_POLLS = 5 # In seconds
# A seed value used for doing random sampling. We want to use a consistent
# seed so refreshing a graph doesn't change the graph.
RANDOM_SAMPLE_SEED = 0.8591313996314685
class BqStates(object):
"""Known BQ States."""
DONE = 'DONE'
class BqErrorMsgs(object):
"""Known BQ Error Msgs."""
ALREADY_EXISTS = 'Already Exists'
class Error(Exception):
pass
class BigQueryError(Error):
def __init__(self, message, query=None):
self.message = message
self.query = query
super(BigQueryError, self).__init__(message)
class NoTableError(Error):
def __init__(self, project_id, dataset_name, table_name):
self.project_id = project_id
self.dataset_name = dataset_name
self.table_name = table_name
message = (
'Table [{project_id}:{dataset_name}.{table_name}] '
'was not found.').format(
project_id=project_id,
dataset_name=dataset_name,
table_name=table_name)
super(NoTableError, self).__init__(message)
class BigQueryResultError(Error):
def __init__(self, message, result):
self._result = result
super(BigQueryResultError, self).__init__(message)
@property
def result(self):
return self._result
class SamplingError(Error):
pass
class BigQueryImportError(Error):
"""Exception raised when a Big Query import fails.
Attributes:
msg: The Error message.
bq_error_message: The error message returned by Big Query. This is useful
to determine things like if the import job already exists. This may
be None if the Http request failed.
"""
def __init__(self, msg, bq_error_message):
super(BigQueryImportError, self).__init__(msg)
self.bq_error_message = bq_error_message
class CredentialKeyError(Error):
pass
class BigQueryClient(object):
"""Client for interacting with BigQuery, using checked in credentials."""
# Errors to retry. List includes system errors(500, 503) and an
# authentication error(401)
RETRYABLE_ERRORS = [401, 500, 503]
def __init__(self, credential_file, env, project_id=None):
"""Obtain service account credentials and authorize HTTP connection.
Load a JSON credential file and create credentials objects.
Args:
credential_file: A file path string for the credentials JSON file.
env: A constant in data_source_config.Environments for Perfkit
(e.g, PRODUCTION or TESTING).
project_id: The project ID to use. If not provided, it will be derived
from the environment.
Raises:
CredentialKeyError: when |stage| does not exist in the key of JSON dict.
"""
self._credential_file = credential_file
self.env = env
self.project_id = (project_id or
config.Services.GetServiceUri(
self.env, config.Services.PROJECT_ID))
self._InitializeHttp()
self._InitializeService()
def _InitializeHttp(self):
"""Sets the http handler for the client."""
self._http = credentials_lib.GetAuthorizedCredentials(
self._credential_file, self.env)
def _InitializeService(self):
"""Creates a new API service for interacting with BigQuery."""
document = None
with open(DISCOVERY_FILE, 'rb') as f:
document = f.read()
f.close()
self.service = build_from_document(document, http=self._http)
@staticmethod
def BuildJobIdString(files, import_round, import_try):
"""Builds a job id string based on the files to load.
This string is designed to be informative to a human, and globally unique.
Args:
files: A list of sorted file names that will be loaded into Big Query.
The job id string contains both the first and last file in this list
and a hash of the list.
import_round: An integer that is incremented each time we reimport data.
The function of this value is to prevent duplicate job_ids when we
reimport data as job_ids must be unique.
import_try: The number of times we've tried to import this file.
Returns:
A job id string of the form
load_job_ImportRound_ImportTry_FirstFile_LastFile_HashOfFiles,
for example:
load_job_0_1_000100_000200_f23ca429
"""
hasher = hashlib.sha1()
hasher.update(''.join(files))
hashed_file_names = hasher.hexdigest()
job_id = 'load_job_%s_%s_%s_%s_%s' % (import_round, import_try, files[0],
files[-1], hashed_file_names)
# Remove characters not allowed in a job_id. Only alphanumeric, -, and _
# are allowed
job_id = job_id.replace('://', '-')
job_id = job_id.replace('/', '-')
return job_id
def _ExecuteRequestWithRetries(self, request, num_tries=5):
"""Executes a request and retries certain failures.
Failures are retried if they are in the list of RETRYABLE_ERRORS
Args:
request: The request to issue to big query. It must be an object with
an execute method.
num_tries: The number of times to attempt the request.
Returns:
The results of the request.
"""
for _ in xrange(num_tries - 1):
try:
return request.execute()
except HttpError as e:
if e.resp['status'] not in self.RETRYABLE_ERRORS:
raise
return request.execute()
def LoadData(self, source_uris, job_id=None,
source_format='NEWLINE_DELIMITED_JSON',
schema=None,
destination_dataset=DATASET_ID,
destination_table=TARGET_TABLE_ID,
write_disposition='WRITE_APPEND'):
"""Loads data into a big query table from JSON files.
Args:
source_uris: A list of uris of the files to import. Uris should be in
the form gs://bucket/object. These files should contain 1 or more
newline separated JSON files.
job_id: An id to create the load data job with. If this job_id is
already in use, the job is not created.
source_format: Specifies the format of the data. Supported values are
NEWLINE_DELIMITED_JSON and CSV.
schema: If provided, describes the schema of the target table. This
should be provided when you expect to be creating the table rather
then appending to it. (via write_disposition)
destination_dataset: The dataset that contains the target table.
destination_table: The name of the table that will receive the data.
write_disposition: Describes how to handle existing tables. The two
typical values here are
WRITE_APPEND - Add records to the table if it already exists.
WRITE_TRUNCATE - Replace the table if it exists.
"""
job_id = self.LoadDataAsync(source_uris, job_id=job_id,
source_format=source_format,
schema=schema,
destination_dataset=destination_dataset,
destination_table=destination_table,
write_disposition=write_disposition)
self.PollImportStatus(source_uris, job_id, blocking=True)
def LoadDataAsync(self, source_uris, job_id=None,
source_format='NEWLINE_DELIMITED_JSON',
schema=None,
destination_dataset=DATASET_ID,
destination_table=TARGET_TABLE_ID,
write_disposition='WRITE_APPEND'):
"""Loads data into a big query table from JSON files.
This method does not wait for the import to complete, rather it
returns a job_id that a user can poll to check the status of the import.
Args:
source_uris: A list of uris of the files to import. Uris should be in
the form gs://bucket/object. These files should contain 1 or more
newline separated JSON files.
job_id: An id to create the load data job with. If this job_id is
already in use, the job is not created.
source_format: Specifies the format of the data. Supported values are
NEWLINE_DELIMITED_JSON and CSV.
schema: If provided, describes the schema of the target table. This
should be provided when you expect to be creating the table rather
then appending to it. (via write_disposition)
destination_dataset: The dataset that contains the target table.
destination_table: The name of the table that will receive the data.
write_disposition: Describes how to handle existing tables. The two
typical values here are
WRITE_APPEND - Add records to the table if it already exists.
WRITE_TRUNCATE - Replace the table if it exists.
Returns:
The import job id, a user can poll this to see when the import task
completes.
"""
try:
job_collection = self.service.jobs()
job_data = {
'projectId': self.project_id,
'configuration': {
'load': {
'sourceUris': source_uris,
'sourceFormat': source_format,
'destinationTable': {
'projectId': self.project_id,
'datasetId': destination_dataset,
'tableId': destination_table},
'maxBadRecords': 0,
'writeDisposition': write_disposition}}}
if job_id:
job_data['jobReference'] = {'jobId': job_id}
if schema:
job_data['configuration']['load']['schema'] = schema
request = job_collection.insert(projectId=self.project_id, body=job_data)
return self._ExecuteRequestWithRetries(request)['jobReference']['jobId']
except HttpError as e:
raise BigQueryImportError(
'Importing the following data failed with the following HTTP error'
'\nerror:%s\ndata_files:%s' % (e, source_uris), str(e))
# TODO: Create a generic PollJobStatus that can be shared across
# big query methods.
def PollImportStatus(self, source_uris, job_id, blocking=True):
"""Checks the status of an import based on a job_id.
Args:
source_uris: A list of uris of the files to import. Uris should be in
the form gs://bucket/object. These files should contain 1 or more
newline separated JSON files.
job_id: The import job_id, used for checking the import status.
blocking: If True, wait until the import completes. If False returns
after checking the status once regardless of the import status.
Returns:
True if the status state is DONE, otherwise returns False.
"""
try:
job_collection = self.service.jobs()
first_pass = True
# If blocking ping for status until it is done, with a short pause
# between calls.
while blocking or first_pass:
first_pass = False
request = job_collection.get(projectId=self.project_id, jobId=job_id)
status = self._ExecuteRequestWithRetries(request)
if 'DONE' == status['status']['state']:
if 'errorResult' in status['status']:
raise BigQueryImportError(
'Importing the following data failed with the following status'
'\nstatus:%s\ndata_files:%s' %
(status, source_uris),
status['status']['errorResult']['message'])
else:
logging.info('Upload complete.')
return True
if blocking:
logging.info('Waiting for the import to complete...')
time.sleep(SLEEP_BETWEEN_RETRIES)
return False
except HttpError as e:
raise BigQueryError(
'Checking the status of an import for the following data failed '
'with the following Http error \nerror:%s\ndata_files:%s' %
(e, source_uris))
def Insert(self, body):
"""Executes an insert job, and waits for completion.
Insert jobs in BigQuery are used to execute asynchronous operations. The
type of job determines the operation taking place, and includes load (from
GZip/JSON file), query (doesn't return results, but can target a
destination table instead), extract (from BigQuery to Cloud Storage), and
copy (one table to another).
More documentation on insert jobs in BigQuery can be found at:
https://developers.google.com/bigquery/docs/reference/v2/jobs/insert
Args:
body: A JSON object describing the body of the job request. See the docs
noted above for details on supported configuration.
Returns:
The reply JSON from the BigQuery request.
Raises:
BigQueryError: If there is an errorResult in the completed job.
"""
job_collection = self.service.jobs()
logging.debug('Issuing Insert job with body: {%s}', body)
request = job_collection.insert(projectId=self.project_id,
body=body)
query_reply = self._ExecuteRequestWithRetries(request)
job_reference = query_reply['jobReference']
while query_reply['status']['state'] == 'RUNNING':
logging.debug('Waiting for job to complete...')
time.sleep(SLEEP_BETWEEN_POLLS)
request = job_collection.get(
projectId=self.project_id,
jobId=job_reference['jobId'])
query_reply = self._ExecuteRequestWithRetries(request)
if 'errorResult' in query_reply['status']:
logging.error('** BigQueryClient.Insert() failed. Response: ')
logging.error(json.dumps(query_reply, indent=4))
msg = 'Insert job failed due to {reason}:\n {message}'.format(
reason=query_reply['status']['errorResult']['reason'],
message=query_reply['status']['errorResult']['message'])
raise BigQueryError(msg)
return query_reply
# TODO: Add support for typing the values returned in each page based
# on table schema.
def ListTableData(self, dataset_name, table_name, page_callback,
max_results_per_page=None):
"""Lists the data in a table. optionally from a specific row/index.
page_callback(response) is called for each page of data returned. For more
information on the response body for tabledata.list, see below.
https://developers.google.com/bigquery/docs/reference/v2/tabledata/list
Args:
dataset_name: The dataset that contains the table to read.
table_name: The table name to read.
page_callback: The function called for each page, with params listed
below.
reply- The reply from the last TableData.List() call.
max_results_per_page: The maximum results returned per page. Most
callers shouldn't need to set this as this method combines the
results of all the pages of data into a single response.
"""
tabledata_job = self.service.tabledata()
page_token = None
try:
while True:
reply = tabledata_job.list(projectId=self.project_id,
datasetId=dataset_name,
tableId=table_name,
pageToken=page_token,
maxResults=max_results_per_page).execute()
if 'rows' not in reply:
break
page_callback(reply)
if 'pageToken' in reply:
page_token = reply['pageToken']
else:
break
except HttpError as err:
logging.error('Error in ListTableData:\n%s', err.content)
def QueryLargeResults(self, query, page_callback, temp_dataset_name=None,
temp_table_name=None):
"""Issues a query that supports an arbitrary result size.
Normal Query jobs are limited to 128Mb, while this method allows processing
of queries of unbounded size. It fires a page_callback for each page of
data returned. The table describes by temp_dataset_name.temp_table_name is
deleted once all pages in the result set have been returned.
page_callback(response) is called for each page of data returned. For more
information on the response body for tabledata.list, see the link below.
https://developers.google.com/bigquery/docs/reference/v2/tabledata/list.
Args:
query: The query to issue.
page_callback: A function called for each page, with params listed below.
reply- The reply from the last TableData.List() call.
temp_dataset_name: The dataset that holds the query results table. If
not provided, TEMP_DATASET_ID is used.
temp_table_name: The name of the query results table. If not provided,
BQ_TEMP_{new_guid} is used as the table name.
"""
try:
temp_dataset_name = temp_dataset_name or TEMP_DATASET_ID
if not temp_table_name:
temp_table_name = 'BQ_TEMP_%s' % self.GetRandomTableName()
logging.info(
'Executing BigQuery with large materialize for project %s, query:'
'\n\n%s', self.project_id, query)
self.QueryInto(query=query,
destination_dataset=temp_dataset_name,
destination_table=temp_table_name,
write_disposition='WRITE_TRUNCATE',
allow_large_results=True)
self.ListTableData(dataset_name=temp_dataset_name,
table_name=temp_table_name,
page_callback=page_callback)
self.DeleteTable(dataset_name=temp_dataset_name,
table_name=temp_table_name)
except HttpError as err:
msg = http_util.GetHttpErrorResponse(err)
raise BigQueryError(msg, query)
def TableExists(self, dataset_name, table_name):
"""Checks for the existence of a table.
Args:
dataset_name: Dataset containing the table to check for existence.
table_name: Table to check for existence.
Returns:
True if a table was found, False if no table exists.
"""
try:
response = self.service.tables().get(projectId=self.project_id,
datasetId=dataset_name,
tableId=table_name).execute()
return True
except HttpError, err:
logging.error(err)
msg = http_util.GetHttpErrorResponse(err)
if msg.startswith('Not Found: Table '):
return False
else:
logging.error(msg + '\n' + err.content)
raise err
return response is not None
def DeleteTable(self, dataset_name, table_name):
"""Deletes a table if it exists.
Rather than checking for the table's existence (which would cause two
round-trips for a successful delete), this method just suppresses any
Errors thrown due to the table not existing, and returns False instead.
Args:
dataset_name: Dataset containing the table to delete.
table_name: Table to delete.
Returns:
True if a table was deleted, False if no table existed.
"""
try:
logging.error('Deleting table %s', table_name)
self.service.tables().delete(projectId=self.project_id,
datasetId=dataset_name,
tableId=table_name).execute()
return True
except HttpError, err:
msg = http_util.GetHttpErrorResponse(err)
if msg.startswith('Not Found: Table '):
return False
else:
logging.error(msg + '\n' + err.content)
raise err
def Query(self, query, timeout=None, max_results_per_page=None,
cache_duration=None):
"""Issues a query to Big Query and returns the response.
Note that multiple pages of data will be loaded returned as a single data
set.
Args:
query: The query to issue.
timeout: The length of time (in seconds) to wait before checking for job
completion.
max_results_per_page: The maximum results returned per page. Most
callers shouldn't need to set this as this method combines the
results of all the pages of data into a single response.
cache_duration: The number of seconds that the query should be cached.
Note this functionality is not available in the base client, but
rather from subclasses (such as GaeBigQueryClient) that
have caching implementations.
Returns:
The query results. See big query's docs for the results format:
http://goto.google.com/big_query_query_results
"""
try:
timeout_ms = (timeout or DEFAULT_QUERY_TIMEOUT) * 1000
job_collection = self.service.jobs()
query_data = {'query': query, 'timeoutMs': timeout_ms}
if max_results_per_page:
query_data['maxResults'] = max_results_per_page
logging.info('Executing BigQuery for project %s, query:\n\n%s',
self.project_id, query_data)
request = job_collection.query(projectId=self.project_id,
body=query_data)
query_reply = self._ExecuteRequestWithRetries(request)
if 'jobReference' not in query_reply:
logging.error('big_query_client.Query() failed: invalid JSON.\n'
'Query Reply:\n%s\n', query_reply)
return query_reply
job_reference = query_reply['jobReference']
if 'rows' in query_reply:
rows = query_reply['rows']
else:
rows = []
while('rows' in query_reply and
len(rows) < int(query_reply['totalRows'])):
query_reply = self._ExecuteRequestWithRetries(
job_collection.getQueryResults(
projectId=self.project_id,
jobId=job_reference['jobId'],
timeoutMs=timeout_ms,
startIndex=len(rows)))
if 'rows' in query_reply:
rows += query_reply['rows']
query_reply['rows'] = rows
result_util.ReplyFormatter.ConvertValuesToTypedData(query_reply)
return query_reply
except HttpError as err:
msg = http_util.GetHttpErrorResponse(err)
logging.error(msg)
logging.error(query)
raise BigQueryError(msg, query)
def CopyTable(self,
source_table,
source_dataset,
destination_table,
destination_dataset=None,
create_disposition='CREATE_IF_NEEDED',
write_disposition='WRITE_TRUNCATE'):
"""Copies the contents and schema from one table to another.
Documentation on copying tables can be found at:
https://developers.google.com/bigquery/docs/tables#copyingtable
Documentation on write/create dispositions can be found at:
https://developers.google.com/bigquery/docs/reference/v2/jobs#importing
Args:
source_table: The name of the table to copy from.
source_dataset: The dataset that contains the table to copy from.
destination_table: The name of the table to copy into.
destination_dataset: The dataset that will be copied into. If not
specified, source_dataset is used.
create_disposition: Describes the behavior to take when the table does
not already exist. Defaults to 'CREATE_IF_NEEDED'.
write_disposition: Describes the behavior to take when the table already
exists. Defaults to 'WRITE_TRUNCATE', which overwrites existing data.
Raises:
NoTableError: The source table did not exist.
Returns:
The reply JSON from the BigQuery request.
"""
destination_dataset = destination_dataset or source_dataset
if not self.TableExists(dataset_name=source_dataset,
table_name=source_table):
raise NoTableError(self.project_id, source_dataset, source_table)
job_config = {
'configuration': {'copy': {
'sourceTable': {
'projectId': self.project_id,
'datasetId': source_dataset,
'tableId': source_table
},
'destinationTable': {
'projectId': self.project_id,
'datasetId': destination_dataset,
'tableId': destination_table
},
'createDisposition': create_disposition,
'writeDisposition': write_disposition
}}}
return self.Insert(job_config)
def QueryInto(self, query, destination_dataset, destination_table,
write_disposition, allow_large_results=False):
"""Issues a query and saves the results to a table.
Args:
query: The query to issue.
destination_dataset: The target dataset to save the results to.
destination_table: The target table to save the results to.
write_disposition: Describes the behavior to take when the table already
exists.
allow_large_results: If True, uses the large result materialization
feature in BigQuery. This is an experimental feature, and takes
longer to process than without this bit set, so it should only be
used if necessary.
Returns:
The reply JSON from the BigQuery request.
"""
try:
job_config = {
'configuration': {'query': {
'query': query,
'destinationTable': {
'projectId': self.project_id,
'datasetId': destination_dataset,
'tableId': destination_table
},
'writeDisposition': write_disposition,
'allowLargeResults': allow_large_results
}}}
return self.Insert(job_config)
except HttpError as e:
raise BigQueryError(
'Issuing the following query failed with the following Http error'
'\nerror:%s\nquery:%s' % (e, query))
@classmethod
def SampleQueryResultsMax(cls, query_results, max_results):
"""Down samples a query results so there are roughly max results.
Returns the data in the same format. See query method for the expected
format. If there are more than max_results, exactly max_results will be
returned. We use a consistent seed so calling this method multiple times
with the same args will give the same result.
Args:
query_results: The query result that you want to downsample.
max_results: integer, The maximum number of results to return. This
should be a non negative integer.
Raises:
SamplingError: If there is conflicting information about the number of
rows in a result or max_results is less than 0 or greater than 1.
Returns:
The downsampled results.
"""
total_results = int(query_results['totalRows'])
if total_results != len(query_results['rows']):
raise SamplingError(
'There were %s rows of results but we were expecting %s rows.' %
(len(query_results['rows']), total_results))
if max_results < 0 or max_results > total_results:
raise SamplingError(
'The max_results to return from sampling must be between 0 and the '
'total number of rows, %s, inclusive. Instead it was %s.' %
(total_results, max_results))
if total_results <= max_results:
return query_results
random.seed(RANDOM_SAMPLE_SEED)
query_results['rows'] = random.sample(query_results['rows'], max_results)
query_results['totalRows'] = len(query_results['rows'])
return query_results
@classmethod
def SampleQueryResultsFraction(cls, query_results, sample_rate):
"""Down samples a query results keep only the sample_rate fraction.
Returns the data in the same format. See query method for the expected
format. Exactly sample_rate * number of results will be returned. We use
a consistent seed so calling this method multiple times with the same args
will give the same result.
Args:
query_results: The query result that you want to downsample.
sample_rate: float, The fraction of data to keep. Should be between 0
and 1 inclusive.
Raises:
SamplingError: If sample rate is greater than 1 or less than 0
Returns:
The downsampled results.
"""
if sample_rate < 0 or sample_rate > 1:
raise SamplingError(
'The sample_rate must be between 0 and 1 inclusive. Instead it was '
'%s.' % sample_rate)
if sample_rate == 1:
return query_results
return cls.SampleQueryResultsMax(
query_results, query_results['totalRows'] * sample_rate)
def GetJobByID(self, job_id):
"""Gets a job from Big Query based on the job_id."""
request = self.service.jobs().get(jobId=job_id, projectId=self.project_id)
return self._ExecuteRequestWithRetries(request)
@classmethod
def FormatDataForTemplates(cls, source_data):
"""Reformats BigQuery data to be a formal dictionary of field/values.
BigQuery by default returns row data as a generic collection of values:
{'schema':{
'fields':[{'name':'col1'},
{'name':'col2'}]},
'rows':[{'f':[{'v':'foo'},
{'v':'bar'}]}]}
This is helpful for binding, but not templates (Django, Soy). This
function transforms rows into a formal dictionary:
{'schema':{
'fields':[{'name':'col1'},
{'name':'col2'}]},
'rows':[{'col1':'foo', 'col2':'bar'}]}
Args:
source_data: The BigQuery reply object.
Returns:
A BigQuery result, with the rows section refactored with field names.
"""
target_rows = []
fields = source_data['schema']['fields']
for source_row in source_data['rows']:
target_row = {}
ctr = 0
source_values = source_row['f']
for field in fields:
field_name = field['name']
field_value_dict = source_values[ctr]
field_value = field_value_dict['v']
target_row[field_name] = field_value
ctr += 1
target_rows.append(target_row)
source_data['rows'] = target_rows
return source_data
@classmethod
def GetRandomTableName(cls):
"""Returns a random table name, used for temp and test tables.
Tests use temp tables to verify behavior of destructive/modifying
operations, and large queries use them to store results as they are listed
back to the user. This method provides guaranteed-unique table names
suitable for use in BigQuery.
Returns:
A unique string containing numbers, letters and underscores.
"""
return str(uuid.uuid4()).replace('-', '_')
@staticmethod
def FormatQuery(select_args, from_args,
where_args=None, group_args=None, order_args=None,
row_limit=None):
"""Returns a formatted query string based on provided arguments.
Args:
select_args: A list of fields to select.
from_args: A list of qualified table names.
where_args: A list of where clauses.
group_args: A list of group by fields.
order_args: A list of order by clauses.
row_limit: The max # of rows to return.
Returns:
A string containing a formatted query for Big Query.
"""
query = []
query.append('SELECT')
select_args = ['\t' + select_arg for select_arg in select_args]
query.append(',\n'.join(select_args))
if len(from_args) == 1:
query.append('FROM ' + from_args[0])
else:
query.append('FROM')
from_args = ['\t' + from_arg for from_arg in from_args]
query.append(',\n'.join(from_args))
if where_args:
query.append('WHERE')
where_args = ['\t' + where_arg for where_arg in where_args]
query.append(' AND\n'.join(where_args))
if group_args:
query.append('GROUP BY')
group_args = ['\t' + group_arg for group_arg in group_args]
query.append(',\n'.join(group_args))
if order_args:
query.append('ORDER BY')
order_args = ['\t' + order_arg for order_arg in order_args]
query.append(',\n'.join(order_args))
if row_limit:
query.append('LIMIT {}'.format(row_limit))
return '\n'.join(query)
@staticmethod
def HasCache():
"""Returns false as the base client doesn't have a cache."""
return False
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cql3handling import simple_cql_types
class CQLHelpTopics(object):
def get_help_topics(self):
return [ t[5:] for t in dir(self) if t.startswith('help_') ]
def print_help_topic(self, topic):
getattr(self, 'help_' + topic.lower())()
def help_types(self):
print "\n CQL types recognized by this version of cqlsh:\n"
for t in simple_cql_types:
print ' ' + t
print """
For information on the various recognizable input formats for these
types, or on controlling the formatting of cqlsh query output, see
one of the following topics:
HELP TIMESTAMP_INPUT
HELP BLOB_INPUT
HELP UUID_INPUT
HELP BOOLEAN_INPUT
HELP TEXT_OUTPUT
HELP TIMESTAMP_OUTPUT
"""
def help_timestamp_input(self):
print """
Timestamp input
CQL supports any of the following ISO 8601 formats for timestamp
specification:
yyyy-mm-dd HH:mm
yyyy-mm-dd HH:mm:ss
yyyy-mm-dd HH:mmZ
yyyy-mm-dd HH:mm:ssZ
yyyy-mm-dd'T'HH:mm
yyyy-mm-dd'T'HH:mmZ
yyyy-mm-dd'T'HH:mm:ss
yyyy-mm-dd'T'HH:mm:ssZ
yyyy-mm-dd
yyyy-mm-ddZ
The Z in these formats refers to an RFC-822 4-digit time zone,
expressing the time zone's difference from UTC. For example, a
timestamp in Pacific Standard Time might be given thus:
2012-01-20 16:14:12-0800
If no time zone is supplied, the current time zone for the Cassandra
server node will be used.
"""
def help_blob_input(self):
print """
Blob input
CQL blob data must be specified in a string literal as hexidecimal
data. Example: to store the ASCII values for the characters in the
string "CQL", use '43514c'.
"""
def help_uuid_input(self):
print """
UUID input
UUIDs may be specified in CQL using 32 hexidecimal characters,
split up using dashes in the standard UUID format:
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
"""
def help_boolean_input(self):
print """
Boolean input
CQL accepts the strings 'true' and 'false' (case insensitive)
as input for boolean types.
"""
def help_timestamp_output(self):
print """
Timestamp output
Cqlsh will display timestamps in the following format by default:
yyyy-mm-dd HH:mm:ssZ
which is a format acceptable as CQL timestamp input as well.
The output format can be changed by setting 'time_format' property
in the [ui] section of .cqlshrc file.
"""
def help_text_output(self):
print """
Textual output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
help_ascii_output = help_text_output
def help_create_index(self):
print """
CREATE INDEX [<indexname>] ON <cfname> ( <colname> );
A CREATE INDEX statement is used to create a new, automatic secondary
index on the given CQL table, for the named column. A name for the
index itself can be specified before the ON keyword, if desired. A
single column name must be specified inside the parentheses. It is not
necessary for the column to exist on any current rows (Cassandra is
schema-optional), but the column must already have a type (specified
during the CREATE TABLE, or added afterwards with ALTER TABLE).
"""
def help_drop(self):
print """
There are different variants of DROP. For more information, see
one of the following:
HELP DROP_KEYSPACE;
HELP DROP_TABLE;
HELP DROP_INDEX;
"""
def help_drop_keyspace(self):
print """
DROP KEYSPACE <keyspacename>;
A DROP KEYSPACE statement results in the immediate, irreversible
removal of a keyspace, including all column families in it, and all
data contained in those column families.
"""
def help_drop_table(self):
print """
DROP TABLE <tablename>;
A DROP TABLE statement results in the immediate, irreversible
removal of a CQL table and the underlying column family, including all
data contained in it.
"""
help_drop_columnfamily = help_drop_table
def help_drop_index(self):
print """
DROP INDEX <indexname>;
A DROP INDEX statement is used to drop an existing secondary index.
"""
def help_truncate(self):
print """
TRUNCATE <tablename>;
TRUNCATE accepts a single argument for the table name, and permanently
removes all data from it.
"""
def help_create(self):
print """
There are different variants of CREATE. For more information, see
one of the following:
HELP CREATE_KEYSPACE;
HELP CREATE_TABLE;
HELP CREATE_INDEX;
"""
def help_use(self):
print """
USE <keyspacename>;
Tells cqlsh and the connected Cassandra instance that you will be
working in the given keyspace. All subsequent operations on tables
or indexes will be in the context of this keyspace, unless otherwise
specified, until another USE command is issued or the connection
terminates.
As always, when a keyspace name does not work as a normal identifier or
number, it can be quoted using single quotes (CQL 2) or double quotes
(CQL 3).
"""
def help_create_table(self):
print """
CREATE TABLE <cfname> ( <colname> <type> PRIMARY KEY [,
<colname> <type> [, ...]] )
[WITH <optionname> = <val> [AND <optionname> = <val> [...]]];
CREATE TABLE statements create a new CQL table under the current
keyspace. Valid table names are strings of alphanumeric characters and
underscores, which begin with a letter.
Each table requires a primary key, which will correspond to the
underlying columnfamily key and key validator. It's important to
note that the key type you use must be compatible with the partitioner
in use. For example, OrderPreservingPartitioner and
CollatingOrderPreservingPartitioner both require UTF-8 keys.
In cql3 mode, a table can have multiple columns composing the primary
key (see HELP COMPOUND_PRIMARY_KEYS).
For more information, see one of the following:
HELP CREATE_TABLE_TYPES;
HELP CREATE_TABLE_OPTIONS;
"""
help_create_columnfamily = help_create_table
def help_compound_primary_keys(self):
print """
CREATE TABLE <cfname> ( <partition_key> <type>, <clustering_key1> type, <clustering_key2> type,
[, ...]], PRIMARY KEY (<partition_key>, <clustering_key1>, <clustering_key2>);
CREATE TABLE allows a primary key composed of multiple columns. When this is the case, specify
the columns that take part in the compound key after all columns have been specified.
, PRIMARY KEY( <key1>, <key2>, ... )
The partitioning key itself can be a compound key, in which case the first element of the PRIMARY KEY
phrase should be parenthesized, as
PRIMARY KEY ((<partition_key_part1>, <partition_key_part2>), <clustering_key>)
"""
def help_create_table_types(self):
print """
CREATE TABLE: Specifying column types
CREATE ... (KEY <type> PRIMARY KEY,
othercol <type>) ...
It is possible to assign columns a type during table creation. Columns
configured with a type are validated accordingly when a write occurs,
and intelligent CQL drivers and interfaces will be able to decode the
column values correctly when receiving them. Column types are specified
as a parenthesized, comma-separated list of column term and type pairs.
See HELP TYPES; for the list of recognized types.
"""
help_create_columnfamily_types = help_create_table_types
def help_create_table_options(self):
print """
CREATE TABLE: Specifying columnfamily options
CREATE TABLE blah (...)
WITH optionname = val AND otheroption = val2;
A number of optional keyword arguments can be supplied to control the
configuration of a new CQL table, such as the size of the associated
row and key caches for the underlying Cassandra columnfamily. Consult
your CQL reference for the complete list of options and possible
values.
"""
help_create_columnfamily_options = help_create_table_options
def help_alter_alter(self):
print """
ALTER TABLE: altering existing typed columns
ALTER TABLE addamsFamily ALTER lastKnownLocation TYPE uuid;
ALTER TABLE ... ALTER changes the expected storage type for a column.
The column must already have a type in the column family metadata. The
column may or may not already exist in current rows-- but be aware that
no validation of existing data is done. The bytes stored in values for
that column will remain unchanged, and if existing data is not
deserializable according to the new type, this may cause your CQL
driver or interface to report errors.
"""
def help_alter_add(self):
print """
ALTER TABLE: adding a typed column
ALTER TABLE addamsFamily ADD gravesite varchar;
The ALTER TABLE ... ADD variant adds a typed column to a column
family. The column must not already have a type in the column family
metadata. See the warnings on HELP ALTER_ALTER regarding the lack of
validation of existing data; they apply here as well.
"""
def help_alter_drop(self):
print """
ALTER TABLE: dropping a typed column
ALTER TABLE addamsFamily DROP gender;
An ALTER TABLE ... DROP statement removes the type of a column
from the column family metadata. Note that this does _not_ remove the
column from current rows; it just removes the metadata saying that the
bytes stored under that column are expected to be deserializable
according to a certain type.
"""
def help_alter_with(self):
print """
ALTER TABLE: changing column family properties
ALTER TABLE addamsFamily WITH comment = 'Glad to be here!'
AND read_repair_chance = 0.2;
An ALTER TABLE ... WITH statement makes adjustments to the
table properties, as defined when the table was created (see
HELP CREATE_TABLE_OPTIONS and your Cassandra documentation for
information about the supported parameter names and values).
"""
def help_delete_columns(self):
print """
DELETE: specifying columns
DELETE col1, col2, col3 FROM ...
Following the DELETE keyword is an optional comma-delimited list of
column name terms. When no column names are given, the remove applies
to the entire row(s) matched by the WHERE clause.
When column names do not parse as valid CQL identifiers, they can be
quoted in single quotes (CQL 2) or double quotes (CQL 3).
"""
def help_delete_where(self):
print """
DELETE: specifying rows
DELETE ... WHERE keycol = 'some_key_value';
DELETE ... WHERE keycol1 = 'val1' AND keycol2 = 'val2';
DELETE ... WHERE keycol IN (key1, key2);
The WHERE clause is used to determine to which row(s) a DELETE
applies. The first form allows the specification of a precise row
by specifying a particular primary key value (if the primary key has
multiple columns, values for each must be given). The second form
allows a list of key values to be specified using the IN operator
and a parenthesized list of comma-delimited key values.
"""
def help_update_set(self):
print """
UPDATE: Specifying Columns and Row
UPDATE ... SET name1 = value1, name2 = value2
WHERE <key> = keyname;
UPDATE ... SET name1 = value1, name2 = value2
WHERE <key> IN ('<key1>', '<key2>', ...)
Rows are created or updated by supplying column names and values in
term assignment format. Multiple columns can be set by separating the
name/value pairs using commas.
"""
def help_update_counters(self):
print """
UPDATE: Updating Counter Columns
UPDATE ... SET name1 = name1 + <value> ...
UPDATE ... SET name1 = name1 - <value> ...
Counter columns can be incremented or decremented by an arbitrary
numeric value though the assignment of an expression that adds or
subtracts the value.
"""
def help_update_where(self):
print """
UPDATE: Selecting rows to update
UPDATE ... WHERE <keyname> = <keyval>;
UPDATE ... WHERE <keyname> IN (<keyval1>, <keyval2>, ...);
UPDATE ... WHERE <keycol1> = <keyval1> AND <keycol2> = <keyval2>;
Each update statement requires a precise set of keys to be specified
using a WHERE clause.
If the table's primary key consists of multiple columns, an explicit
value must be given for each for the UPDATE statement to make sense.
"""
def help_select_table(self):
print """
SELECT: Specifying Table
SELECT ... FROM [<keyspace>.]<tablename> ...
The FROM clause is used to specify the CQL table applicable to a SELECT
query. The keyspace in which the table exists can optionally be
specified along with the table name, separated by a dot (.). This will
not change the current keyspace of the session (see HELP USE).
"""
help_select_columnfamily = help_select_table
def help_select_where(self):
print """
SELECT: Filtering rows
SELECT ... WHERE <key> = keyname AND name1 = value1
SELECT ... WHERE <key> >= startkey and <key> =< endkey AND name1 = value1
SELECT ... WHERE <key> IN ('<key>', '<key>', '<key>', ...)
The WHERE clause provides for filtering the rows that appear in
results. The clause can filter on a key name, or range of keys, and in
the case of indexed columns, on column values. Key filters are
specified using the KEY keyword or key alias name, a relational
operator (one of =, >, >=, <, and <=), and a term value. When terms
appear on both sides of a relational operator it is assumed the filter
applies to an indexed column. With column index filters, the term on
the left of the operator is the name, the term on the right is the
value to filter _on_.
Note: The greater-than and less-than operators (> and <) result in key
ranges that are inclusive of the terms. There is no supported notion of
"strictly" greater-than or less-than; these operators are merely
supported as aliases to >= and <=.
"""
def help_select_limit(self):
print """
SELECT: Limiting results
SELECT ... WHERE <clause> [LIMIT n] ...
Limiting the number of rows returned can be achieved by adding the
LIMIT option to a SELECT expression. LIMIT defaults to 10,000 when left
unset.
"""
class CQL3HelpTopics(CQLHelpTopics):
def help_create_keyspace(self):
print """
CREATE KEYSPACE <ksname>
WITH replication = {'class':'<strategy>' [,'<option>':<val>]};
The CREATE KEYSPACE statement creates a new top-level namespace (aka
"keyspace"). Valid names are any string constructed of alphanumeric
characters and underscores. Names which do not work as valid
identifiers or integers should be quoted as string literals. Properties
such as replication strategy and count are specified during creation
as key-value pairs in the 'replication' map:
class [required]: The name of the replication strategy class
which should be used for the new keyspace. Some often-used classes
are SimpleStrategy and NetworkTopologyStrategy.
other options [optional]: Most strategies require additional arguments
which can be supplied as key-value pairs in the 'replication' map.
Examples:
To create a keyspace with NetworkTopologyStrategy and strategy option of "DC1"
with a value of "1" and "DC2" with a value of "2" you would use
the following statement:
CREATE KEYSPACE <ksname>
WITH replication = {'class':'NetworkTopologyStrategy', 'DC1':1, 'DC2':2};
To create a keyspace with SimpleStrategy and "replication_factor" option
with a value of "3" you would use this statement:
CREATE KEYSPACE <ksname>
WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};
"""
def help_begin(self):
print """
BEGIN [UNLOGGED|COUNTER] BATCH [USING TIMESTAMP <timestamp>]
<insert or update or delete statement> ;
[ <another insert or update or delete statement ;
[...]]
APPLY BATCH;
BATCH supports setting a client-supplied optional global timestamp
which will be used for each of the operations included in the batch.
Only data modification statements (specifically, UPDATE, INSERT,
and DELETE) are allowed in a BATCH statement. BATCH is _not_ an
analogue for SQL transactions.
_NOTE: Counter mutations are allowed only within COUNTER batches._
_NOTE: While there are no isolation guarantees, UPDATE queries are
atomic within a given record._
"""
help_apply = help_begin
def help_select(self):
print """
SELECT <selectExpr>
FROM [<keyspace>.]<table>
[WHERE <clause>]
[ORDER BY <colname> [DESC]]
[LIMIT m];
SELECT is used to read one or more records from a CQL table. It returns
a set of rows matching the selection criteria specified.
For more information, see one of the following:
HELP SELECT_EXPR
HELP SELECT_TABLE
HELP SELECT_WHERE
HELP SELECT_LIMIT
"""
def help_delete(self):
print """
DELETE [<col1> [, <col2>, ...] FROM [<keyspace>.]<tablename>
[USING TIMESTAMP <timestamp>]
WHERE <keyname> = <keyvalue>;
A DELETE is used to perform the removal of one or more columns from one
or more rows. Each DELETE statement requires a precise set of row keys
to be specified using a WHERE clause and the KEY keyword or key alias.
For more information, see one of the following:
HELP DELETE_USING
HELP DELETE_COLUMNS
HELP DELETE_WHERE
"""
def help_delete_using(self):
print """
DELETE: the USING clause
DELETE ... USING TIMESTAMP <timestamp>;
<timestamp> defines the optional timestamp for the new tombstone
record. It must be an integer. Cassandra timestamps are generally
specified using milliseconds since the Unix epoch (1970-01-01 00:00:00
UTC).
"""
def help_update(self):
print """
UPDATE [<keyspace>.]<columnFamily>
[USING [TIMESTAMP <timestamp>]
[AND TTL <timeToLive>]]
SET name1 = value1, name2 = value2 WHERE <keycol> = keyval
[IF EXISTS];
An UPDATE is used to write one or more columns to a record in a table.
No results are returned. The record's primary key must be completely
and uniquely specified; that is, if the primary key includes multiple
columns, all must be explicitly given in the WHERE clause.
Statements begin with the UPDATE keyword followed by the name of the
table to be updated.
For more information, see one of the following:
HELP UPDATE_USING
HELP UPDATE_SET
HELP UPDATE_COUNTERS
HELP UPDATE_WHERE
"""
def help_update_using(self):
print """
UPDATE: the USING clause
UPDATE ... USING TIMESTAMP <timestamp>;
UPDATE ... USING TTL <timeToLive>;
The USING clause allows setting of certain query and data parameters.
If multiple parameters need to be set, these may be joined using AND.
Example:
UPDATE ... USING TTL 43200 AND TIMESTAMP 1351620509603
<timestamp> defines the optional timestamp for the new column value(s).
It must be an integer. Cassandra timestamps are generally specified
using milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC).
<timeToLive> defines the optional time to live (TTL) in seconds for the
new column value(s). It must be an integer.
"""
def help_insert(self):
print """
INSERT INTO [<keyspace>.]<tablename>
( <colname1>, <colname2> [, <colname3> [, ...]] )
VALUES ( <colval1>, <colval2> [, <colval3> [, ...]] )
[USING TIMESTAMP <timestamp>]
[AND TTL <timeToLive>];
An INSERT is used to write one or more columns to a record in a
CQL table. No results are returned.
Values for all component columns in the table's primary key must
be given. Also, there must be at least one non-primary-key column
specified (Cassandra rows are not considered to exist with only
a key and no associated columns).
Unlike in SQL, the semantics of INSERT and UPDATE are identical.
In either case a record is created if none existed before, and
udpated when it does. For more information, see one of the
following:
HELP UPDATE
HELP UPDATE_USING
"""
def help_select_expr(self):
print """
SELECT: Specifying Columns
SELECT name1, name2, name3 FROM ...
SELECT COUNT(*) FROM ...
The SELECT expression determines which columns will appear in the
results and takes the form of a comma separated list of names.
It is worth noting that unlike the projection in a SQL SELECT, there is
no guarantee that the results will contain all of the columns
specified. This is because Cassandra is schema-less and there are no
guarantees that a given column exists.
When the COUNT aggregate function is specified as a column to fetch, a
single row will be returned, with a single column named "count" whose
value is the number of rows from the pre-aggregation resultset.
Currently, COUNT is the only function supported by CQL.
"""
def help_alter_drop(self):
print """
ALTER TABLE: dropping a typed column
ALTER TABLE addamsFamily DROP gender;
An ALTER TABLE ... DROP statement removes the type of a column
from the column family metadata. Dropped columns will immediately
become unavailable in the queries and will not be included in
compacted sstables in the future. If a column is readded, queries
won't return values written before the column was last dropped.
It is assumed that timestamps represent actual time, so if this
is not your case, you should NOT readd previously dropped columns.
Columns can't be dropped from tables defined with COMPACT STORAGE.
"""
def help_create(self):
super(CQL3HelpTopics, self).help_create()
print " HELP CREATE_USER;\n"
def help_alter(self):
print """
ALTER TABLE <tablename> ALTER <columnname> TYPE <type>;
ALTER TABLE <tablename> ADD <columnname> <type>;
ALTER TABLE <tablename> RENAME <columnname> TO <columnname>
[AND <columnname> TO <columnname>]
ALTER TABLE <tablename> WITH <optionname> = <val> [AND <optionname> = <val> [...]];
An ALTER statement is used to manipulate table metadata. It allows you
to add new typed columns, drop existing columns, change the data
storage type of existing columns, or change table properties.
No results are returned.
See one of the following for more information:
HELP ALTER_ALTER;
HELP ALTER_ADD;
HELP ALTER_DROP;
HELP ALTER_RENAME;
HELP ALTER_WITH;
"""
def help_alter_rename(self):
print """
ALTER TABLE: renaming a column
ALTER TABLE <tablename> RENAME <columnname> TO <columnname>
[AND <columnname> TO <columnname>]
The ALTER TABLE ... RENAME variant renames a typed column in a column
family.
"""
def help_drop(self):
super(CQL3HelpTopics, self).help_drop()
print " HELP DROP_USER;\n"
def help_list(self):
print """
There are different variants of LIST. For more information, see
one of the following:
HELP LIST_USERS;
HELP LIST_PERMISSIONS;
"""
def help_create_user(self):
print """
CREATE USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER];
CREATE USER creates a new Cassandra user account.
Only superusers can issue CREATE USER requests.
To create a superuser account use SUPERUSER option (NOSUPERUSER is the default).
WITH PASSWORD clause should only be used with password-based authenticators,
e.g. PasswordAuthenticator, SimpleAuthenticator.
"""
def help_alter_user(self):
print """
ALTER USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER];
Use ALTER USER to change a user's superuser status and/or password (only
with password-based authenticators).
Superusers can change a user's password or superuser status (except their own).
Users cannot change their own superuser status. Ordinary users can only change their
password (if the configured authenticator is password-based).
"""
def help_drop_user(self):
print """
DROP USER <username>;
DROP USER removes an existing user. You have to be logged in as a superuser
to issue a DROP USER statement. A user cannot drop themselves.
"""
def help_list_users(self):
print """
LIST USERS;
List existing users and their superuser status.
"""
def help_grant(self):
print """
GRANT (<permission> [PERMISSION] | ALL [PERMISSIONS])
ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>
TO <username>
Grant the specified permission (or all permissions) on a resource
to a user.
To be able to grant a permission on some resource you have to
have that permission yourself and also AUTHORIZE permission on it,
or on one of its parent resources.
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_revoke(self):
print """
REVOKE (<permission> [PERMISSION] | ALL [PERMISSIONS])
ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>
FROM <username>
Revokes the specified permission (or all permissions) on a resource
from a user.
To be able to revoke a permission on some resource you have to
have that permission yourself and also AUTHORIZE permission on it,
or on one of its parent resources.
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_list_permissions(self):
print """
LIST (<permission> [PERMISSION] | ALL [PERMISSIONS])
[ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>]
[OF <username>]
[NORECURSIVE]
Omitting ON <resource> part will list permissions on ALL KEYSPACES,
every keyspace and table.
Omitting OF <username> part will list permissions of all users.
Omitting NORECURSIVE specifier will list permissions of the resource
and all its parents (table, table's keyspace and ALL KEYSPACES).
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_permissions(self):
print """
PERMISSIONS
Cassandra has 6 permissions:
ALTER: required for ALTER KEYSPCE, ALTER TABLE, CREATE INDEX, DROP INDEX
AUTHORIZE: required for GRANT, REVOKE
CREATE: required for CREATE KEYSPACE, CREATE TABLE
DROP: required for DROP KEYSPACE, DROP TABLE
MODIFY: required for INSERT, DELETE, UPDATE, TRUNCATE
SELECT: required for SELECT
"""
|
|
#!/usr/bin/env python
#
# DNATool - A program for DNA sequence manipulation
# Copyright (C) 2012- Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Email: farrell.damien_at_gmail.com
"""Images for DNATool"""
import Tkinter as tk
def logo():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhAAEAAcZAAAAAAAABAAcOBwsVCw0aDQ4bDhAfEBYbLBMkExYpFkYe'
+'Hi0pDEshIU8jIykzUyA+ICw3WSNDIy46XyZIJnAxMShNKCtSK1RMFlpSGEJS'
+'hpVBQZZCQmBXGZpERJ9GRjZnNjdpNzlsOalKSjtxOztyOz52PkB7QFdtslhu'
+'s1pxuF10vl11vod7JEuPS2N8ykuQS0yRTE6VTk6WTlCaUFGaUVGbUVOfU1Sg'
+'VFWjVVakVrSjL7WkMLqpMcCuM8GuM8y5Nv//////////////////////////'
+'////////////////////////////////////////////////////////////'
+'////////////////////////////////////////////////////////////'
+'////////////////////////////////////////////////////////////'
+'/////////////////////////////////////////////////yH+EUNyZWF0'
+'ZWQgd2l0aCBHSU1QACH5BAEKAEAALAAAAAAAAQABAAf+gECCg4SFhoeIiYqL'
+'jI2Oj5CRkpOUlZaXmJmam5ydnp+VAKKjpKWmp6ipqqusra2gsLGykq61tre4'
+'uaSbALO+v4S9iLrExca3vMfKy8yuic3Q0bbJ0tXWuc/X2tbU297fo9ng48Xd'
+'5OfR4ujrr5rs78vq8PO77vT3yMP4+Ob7/qfy/q3rJ7BgQFIHIChcyLChw4cQ'
+'I0qcSJHhgVQEC/47OAoCio8gQ4ocSbKkyZMoU4aEgNGexpccRXlUSbOmzZsj'
+'WaLKKEqEh59AgwodSrSo0aNIkyoNKkJVTAAzcUqdSvWjToAuT3nYwLWr169g'
+'w4odS7as2bNePTjVhypq1bf+cE9eNcUTwFa0ePPq3YtWbctDqtzGHUx4bqm6'
+'d/kqXsy4rN+dbE8JJky5quF6mVQlbsy5s+LHWAGnmly59M3L4bKa2uy5tWuy'
+'oOlGNkXatO2UqEUhfs2799fYh2eXqn27OMncwjKnYu27eWfgmA0FNk4dJfLd'
+'zrM/Xyu6bfXvx/9i0qy9PGPoqbtLBs8e5HXVpZibn28WvW7hpIi3t/1eOSr5'
+'9AUYln3JSTfafuz1N95yAjYIG3cGeofgdwpeQp6DGP4GYSHTTVhdhZZcmOGI'
+'BD6ln4eFiWchgyOSuGEwB6JoHIihsNgihiXi15GMM6oYoo03NpijerTxWByN'
+'lIj+GKSAQ0a4npH8+Vjjf0s62CSHMUJZGpKTKFnlfFfCKKGWlXFJC5Bfmhfm'
+'IB2SSZmZkXiZpnZrCtKmm4PBCYmcczpXJxB34gmXno/w2advfwYqqGVSJonm'
+'oc0lmuWibxHqiKGQvibpmJQyCpl/WmVK54tsTopgCiqkqqqqK5xQaaNdPiqq'
+'pqTaaep+DqiywqufLkjlrJHWCuit7SEHgAq8huZrqMAiKqyixaqCrKfKrvhr'
+'s7xt+qSHxk5LlaWNYIrtec8Sm6C0ycoG6mrjZlsupwh2m25w68bXLq2wQntu'
+'Kt5OBS4j4t67l7ZFoigvteouy67AnhE8nIwHfwvrmdf+MtyYw/lBjC7C9Cps'
+'r8WcYbyjwRtL3Ku1zIJMbr7mghexvxPHKavKeYksk8b8zhsdygvT/Nm72074'
+'slT/LhKwzw+yDO9+xkpQkUQXxVzozEjXB3TB3Gp0KdVVJ32ymEHHq3W4XHct'
+'ls1Q4SzQ1qj4tNRRGvTWwdt0D9WU0mEzPTbA2gDYmALzmKj2RmRf4zdjDASu'
+'480kG1S4NYcvljg8gje+9uPVRK7Y5O9UnrXjfBveG+fseC703kb3PbriRD5s'
+'OeGhQ7465Yun/bopLqyq++689667C8JiKTpvpA9U+4nUDV2aCsGDnfnsnR8/'
+'uCn9msa81LYO/1rx6Jguds7+xV3/9TAef/waBQykr/76DDSwynje6w3+beJX'
+'K8vRvf0JX8a3l1L98s3rBP7chb0fLS1a87NN/RI2iwHia3yc0JfLSqbAAEaw'
+'bM4q4JTyhkBU/K8yC+xYAzGYPwsakIP78qBxQriz+5GQgBCsC/J6lEDrmXB/'
+'o9BcCTXoqAOm8BQfpAwL0+MLB7pGf/Xi3+dUGL4bJjGHQnJirHw4wRoCkIfl'
+'gyKTpEgxFFaRifTj4gl7Rh8kZpFxSwTiCsW4QTKCiY1ToyKFKGhDLPLMfGWE'
+'I9u8OEcrglCPXXSjmgAZO6ydzo9CJOSeXvhA+wmwZX0EYwXtOEY8vpGSi5Tj'
+'h+j+eMUYfsKIrTHjHZU4oVylYldNxGQPU5ZHVe7RkKfyXavW6MpAWnKQtSyk'
+'6zqVSk96ApQNU6QiJMjLwQzxPr8A5nZymTpNFvMtxyzQCCt2SV+e0XbPrKM1'
+'L0hNXG6zkqTMZiKZGUdWVtOR3OSjOKkSTWAMq5vlESU4R7bOcX4Th6LQIQzR'
+'KcN6/pGcr7xlPIUZP39WpZ3AUGbICCq9YqbAARHJADsZqktS6LORDPwlJLUU'
+'zRmWBKHJZOQRKeokWC6qoxMFKOYEOiqVti6cJyUmSkBaRJGGkqTCUyeUUDoV'
+'mk7TnN7kJz6x2SmeSsWnLoRnS++5SpMKyqg4QWosFHr+MZw6b5dFlelJpAoL'
+'qq6MqVPUqZGgehOugsKri5FnG7FKKbLaxKyftGkwXVpStsZ0o1u1au0uOlK6'
+'5tSpeHJrTeCqUaVmR61NLYUEfMfYxjr2sbsDHl5NQthHGtZPes3eS4rhUZJU'
+'Np2CHKhfr7pZXHR2JJ+tSwPYx9rWuva1rnXfaEtVWmKcViSpHerlulpb26YU'
+'rKOEyVR7q4vbhiS3T0RdXImLi4Q8zSESyGxdN5tU5tLDndatrnVpF1Liane7'
+'pUsoc78L3u6J17tTdedyyYHd8ar3vcHdxnndC9/6yuwc9s2vfverW27w978A'
+'DvBTriHgAhv4v9I8sIIXzOD+Bjv4wRCOsIQnTOEKW/jCGIZwgjNcxJeW17wV'
+'/TB7pyvicZSzxPglMYq9ceIVg8PDLtZGi2MsXxXTuBozvrF//6pjAge0xzi2'
+'MZCZkeMhNwPGRo7Hj5NMZCEzuRxLfvIxkCxlYhS5yla2MQImwOUue/nLYA6z'
+'mMdM5jKb+cxersANVfGDHrj5zXCOs5znTOc62/nOeM4znH9AKlVMAAaADrSg'
+'B03oQhv60IhOtKIXLegZrDkVPdiBpCdN6Upb+tKYzrSmN83pTlO6B31OxZ8Z'
+'TepSm/rUjHa0BlURaU+7+tWwjrWnQS0lP6P61rjO9aJVfU9Wy/rXwA52p2k9'
+'Plv+6/rYyM41r4XKY1K0WtjQjjawie1IYyf72thW9LIz+pRnS/vb4N40tblt'
+'7Wyb+9yA3rYIhwnpcLv73ZUe97rfeYpRo/ve2FZ3C7sN7367W977Lje+B67s'
+'R6PC2/5OeLABTkTa1pvgEC/4qtut8IpPO9SosHfEN05qfTec3Qe3uMhjzXBk'
+'knYUGue4yhHtcZM38xQIH7nMM13yDQt85TgfdMs3XLuYz/znn8b4w3NOdJ0b'
+'HOZAT7qla47klBc95zu/ss+VPnOm29jpT1951KMMgKlTfeRWbzbKs/70ra/U'
+'2V+nethPLgqsk33jZg+xKLye9oqv3eGmcPvbIR73l5v+gu51T/jdNZv3vUP9'
+'6H8PPNAHT+/CG17riC8F4BUPb8bf/PEE7zvIkU55mVte1JhXueb53XmwC93x'
+'EX9BDV5w6BjEQPSRR3vpRf75jG/8BSUAQAJoUGgaEMAAI2B9xEff89nT/vSl'
+'0Hu2cT8KAvA+0C+4QQFGUQLhZz72o5i88aVd+6EPnPmkAD7rwU+K6vMd+3Pf'
+'vt2RTwrlJ5v8pShBC3J/ihK8fuDER7L21S/s7qMe3fBnChagCs6Hf+jXdfwn'
+'eOw3dvcWgLhQgPeWfza2fwn4a/6XfA1If8YAgecmgWKXfsbHAxeAASRYgiXI'
+'AtF2ge13bzIQAcpgAB8QgQf+SIE/V3MYkIIL2Hb45oC3wIEdOIPbZ4M4WGug'
+'t4Ma+IDPJ4MTF3LGJ4TQpoIMuIM2QAC4YH/Xt4ScN3tO2H85CADud22+Zwvm'
+'d4W9RnFaqAo3+IRd+IXvZwK1gAAzMHxA2IRoOITFVoTfd4St4INKWIZMeIap'
+'kIZcSIS2l4e5wIc/iIWJR4eBaIfVhocAqIe2gIjZ5oFsh4CMiAqCuHBraIQb'
+'mISJ6IdZWHpbyImE6H3mxoNI2IfMdok0WHV1qIan+H/ZJgMDsAwhwIrcVnyZ'
+'eAqbeHGziIH3FobGMIah2Ip4J3lBGIuDeIeFOIxUiAoEkAPRiArGeIy7qH/L'
+'2Ij+suiMqIhuxFgKBRiOpCAAJGB96GaJySh7gIgKC2CC8BiP8liCHNCJBEeO'
+'AMCBNmAKzoeO6TiH7Wg8H+iFEReOPgh/lIiN88aLAQlil8iGyeZ7AYCI4JeQ'
+'CrlvDEmK3PWQHEcDDwCKgvYCIwAC9yeHiqiMveiQ6xiFoXd+J8mOGhk9AwmR'
+'LYls6kh4KNmQKTaTNWmSoriIOjliPNmTZIiMOAmTnVdzLHZ1RFmU2TiB2xhe'
+'Q9mUuriQ2piSpfCO87iV8liPwbiCVFmVGHmVQTkKv2iB9hiWF/lxpFeWonCW'
+'sgaFOqiWa+lymweUMamJjkhukEiX13aTjZeTeemLezn+b5fnl7oGmL6GlaQA'
+'lySXloiZbIpphoNpCo4Ja3JJkJH5lwBZmaVwma+WmTS5mdrWmUnJjKbojbSI'
+'bS9AAzPwmrAZm7I5m7JZkuY2mX/omY1ZmAHXl9hGA8wQAWLJlhl5mtzYjI/4'
+'jLfJDKNparg5isapl92YnN+Yb8w5nHbZlrpplrzJlodpk9f5jy+ZfVFJmNPJ'
+'l8pZieFZlzxHlqXnA6pwAd2Znd95bDt3C81Zas+Jl52nAz5QZzwwnzbnm5y5'
+'DPnZcaZZgeAmmtiJCweaagmqoNwHmeppoA0qdRL6bgwqnsrwoLsWoRmKnOhZ'
+'ndcWAxEQZgigChUQZrnIoUb+GZhIGaLnaZgEOnAToApxyHH7KZgyOqO9mZ4Q'
+'d6OpkKNwB6I9GpcUGqQ4CnvjCYJH6qPeWaP4JqSoQKQ++aKL+aQiSqNASnBU'
+'egpW6pI/yaNaiqRfyZIR96WmEKZOaZVQWaapSZ2r6aVLqqNGCqfDlqR0OqRM'
+'OqYxiqeutqEcp6alwKYG2KSYCKiPeaZzOah1WqSI+oqKKmmCunGESgqGim87'
+'+qeTKm56aqOPeqVPOZCSOqmVmqahKqZYSpmdmqeMqpmOyqd2GqmtGpqfOqWp'
+'2qZj+aa16qqqKYyxWqV9uqq52auadqpKKquQ6qfkaaychqx7KqyzyqxO6qw0'
+'d6vO93apo5CpF8p1paqo0Aqqyiqqbkqq1uqpr+qhuKatosCtLjqqrniux4qt'
+'6MauAOCu7Imh8opp4Yqr46qq8LqS1bqvQZeuK2ev+LqctEqwS9eFFUCbEBux'
+'EguxapYKODCxGJuxsokDB8gBXPmxIBuyIjuyJGuCXvmrWGYMV5ayuEBlLDsN'
+'XPeyMDuQMlsLK1uz7UCzOMsKN7uzXeiz7xOzQPuqQ4uoRYuyR3uASTuiSxu0'
+'Z9e0cgq11Cq1UUq1Rku1gMFhWru1XNu1XhtggQAAOw==')
return img
def openproject():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAIcAAAAAANiGLNiHLdiILdmOM9qUNdqaNtqhNtqmN9qrNdqq'
+'NtqvNdqzNtm0NNu2POGuT+GoUeOuW+SvYPDLJvHNLvLQOeHCXuLDX/LTR/PT'
+'R/PWVPTXVPTXV+LDYOnAYOrFfunTbu3Xd/HPafHQbPXaYfXaYvXaY/bdbfbd'
+'b/HScvLTefLUf/becvLdf/ffeffgeoaGhoyMjJOTk5qamp6enqKioqWlpaio'
+'qKurq62trbOwqLCwsLGxsLKysrS0tLW1tba2tre3t7i4uLm5ubq6uru7u7y8'
+'vPLVgfPXhvPYjvPalvTck/XekfXelPTbnvTdp/fhgffhgvfihffjh/jjivjk'
+'jPjkjvXgn/jlkfjmlfbhrPnpofnppPrsrPrsr/rts/vvucXFxcbGxsjIyMrK'
+'ysvLy8/Pz9jSxtrUyN7Yy9DQ0NHR0dXV1dbW1tfX19nZ2dvb2tvb2+3m2Pjq'
+'wvvyyvzyyPrw3OLi4uTk5Ozs7O3t7e/v7/by5Pv26Pv36v345/356P356/36'
+'6/766P767/777/Dw8PHx8fLy8vT09Pb29v368f368v/89f789v789//99vj4'
+'+Pn5+fr6+vv7+//9+P7+/f/+/f/+/v///v///wAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAACH5BAEAAJsALAAAAAAQABAAAAjoADcJHDhwSJAf'
+'PXYQXLipjR5JlAy9wcFQoBBFiSAqUnSj4iYgiN7guTPmkA2PPiLF2aNGjaYa'
+'mxpY6EDzggMekdjsKVNGE41NDCxlwnQpkx8dkdbkCSNG04xNCx5ZYXJFSyEe'
+'k9zcIWNGk4xNChY1WXLEw4MCaNMWiLEpQZ8PESBIEBRITho0Z+AUEIggUyVI'
+'jQZ5sTKlRQgQfPZuOuCoDpguVahEcXGCBIc/ig0w+sIlixQoL1CU0IABkOIC'
+'hLZggcLCxIYMFShMoKOYgJ0nTpQkQbJCRYoRIuYQEBhgk4AByJMjPz4wIAA7')
return img
def saveproject():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAIcAAAAAACBarS5fpjppqj5qqzNoukRtq0RwsFd+u1F/w1N/'
+'wV2Ev1SBxFWDxliEw1uGx12IyGGKx2CJyWGLyWKLyGWNymWNzGiOzWyLzWiL'
+'026K122L2myN3GmQzW6TzG6X1HGY0HOZ0XOa1HWa0Hee2Xmc1Xue1Hie2Hif'
+'2Hmf23uc3XCP4Xmg2Xqg2Xuh3Hyj232j3n6l3YS/UYS/VJjJb5nJcZWVlZmZ'
+'mZ+fn6GhoaOjo6Wlpaenp6mpqaurq6ysrK2tra6urq+vr7CwsLGxsbKysrOz'
+'s7S0tLW1tba2tre3t7i4uLm5ubq6uru7u7y8vIKk1oCm3YWi3YWo3YGn4YGn'
+'4oyv5JK15pe26pq275m46qG/6qG/757A77PH47rO77DN88fuh8fujNf0otf2'
+'osDAwMLCwsPDw87Ozs/Pz9DQ0NHR0dLS0tTU1NXV1dbW1tjY2NnZ2dvb29zc'
+'3N3d3d7e3v//3eDg4OHh4eLi4uPj4+Xl5ebm5ufn5+jo6Orq6uvr6+3t7e7u'
+'7u/v7+Xx7ebx7ebw7+bx7+n05Ojz6urz8+318///4PDw8PLy8vPz8/T09PX1'
+'9fb29vf39/H4//f7//j4+Pr6+vv7+/r7/fr8/fv8/fv8/vz8/P39/fz9/v39'
+'/v7+/v///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAACH5BAMAAKMALAAAAAAQABAAAAjwAEcJbLJESZIj'
+'AhMqdKLGzyNPnQLdMQNE4agmeEKJ2sgRU5oeCplY0shxI6Y/PBQqmdRnkCRM'
+'lxrVOcNnh0IkkgDVeeNmzZknZfboUHgkkiA+efh8MAECyoUOI0JQGFUE0h88'
+'dPRYACPKS8ktEoY86kMnzp0HXUR52FiJkpUIQRzlgeNmDgQumjiB+pRp0xQF'
+'P2S2YSOngpYYUV60YIHixIIedR7JITzhShUqMFykICECwagdaAJBisQAi6JC'
+'hwwRWlTigMAcOHL4SJAlkQwaNWYgUjHA4qgGX+yEGUNGDCMpBHwXcJBhBYcN'
+'GjAYEBAQADs=')
return img
def zoomin():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAPYAADpuKi9wKjFzLDN1LjV4Lzd7MT5/PZNbEZdfGJpiG51l'
+'H51nIqVtK6hwMLF5PECGOUaOP1OTRFCaSFaiTl+tVmKgUWmnWHCpVGGwWGOz'
+'WWW1W3uwXmKoYW67aX6yYHi1ZHC9abyNVYS2ZYy6bJS/c8WXZMeZZsyidc6k'
+'dprDeJzEeXaczXug0H+l04qTot21iafKguTBnejLq4Wp1oqu2Zy635a44Z/A'
+'56LD6K7J6KjI7KvL7rfQ7a7O8K/P8bTT9LvW873V8LjW9rnX973Y87vZ+b3b'
+'+r7b+r/c+sDa9MHa9MLb9MPc9MTc9MXd9cbd9cfe9cjd88je9cjf9cnf9crg'
+'9cvh9s3h9czh9s7i9szi+M3i+dDj9tHl99Lk99Pl99bn99fn99no+Nrp+N3q'
+'+d7r+d/s+d7s+uHt+eLt+eLu+ePu+gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAGwA'
+'LAAAAAAQABAAAAeOgGyCg4SFhEhDQkI9OoaDR1tra2pjRDeOkGthWJxkIBoU'
+'hUNkYVZWU09YGTAThT9rWKhLHRgfKRYQD4M9ZrJLFCokIyIbBYM6m0tLRBwS'
+'FR4RBAGDODxiU0RES14PFwOGNjlYXl5RNQYCAI40My0sKy4nMigNjoUOJTEm'
+'CvaEDCEvEPQjlGDBgYEIE7IJBAA7')
return img
def zoomout():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAPYAAOoRHOwaI+4mLvE1OpNbEZdfGJpiG51lH51nIqVtK6hw'
+'MLF5PLV9QfRFSPdUVvpjYvxvbfNxc7yNVcWXZMeZZvmAfsyidc6kdnaczXug'
+'0H+l05KUnYqTouiMguuTh+2bjdWuht21ifChkfKnleTBnejLq5Gox4Wp1oqu'
+'2Yyw25O235y6352735a44Z/A56LD6K7J6KjI7KvL7rXP7LfQ7a7O8K/P8bTT'
+'9LvW873V8LjW9rnX973Y87vZ+b3b+r7b+r/c+sDa9MHa9MLb9MPc9MTc9MXd'
+'9cbd9cfe9cjd88je9cjf9cnf9crg9cvh9s3h9czh9s7i9szi+M3i+dDi9dDj'
+'9tHl99Lk99Pl99bn99fn99no+Nrp+N3q+d7r+d/s+d7s+uHt+eLt+eLu+ePu'
+'+gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAGUA'
+'LAAAAAAQABAAAAeMgGWCg4SFhEA9Ojc2MoaDP1NfY2FcPC6OkGRZUlJWXTMq'
+'hjtdWk5OUkZOVCmGN2RQS0dLERAPDg0DAoM1X7FDQxAjIh8eHQGDMVpQvzwR'
+'t7kBAIMvNFtLPDhDVywbji0wUFdXSSsmIAyOKCcnGRgcFiUXCo6FCxMkEwf0'
+'hAkSIQb7CBlAQCCgwYNlAgEAOw==')
return img
def undo():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAIcAAAAAACZmIidmIydnIylpJSpqJSprJitrJixtKC5wKi9x'
+'KzJ1LTN1LjZ6MTl9Mzp+NDt/NTyBNj2CNz6DNz+FOECFOUCGOkGHOkOKPEWN'
+'PkePQEmRQkqSQkuUQ0yURE6XRk6RSU+RSlCZR1GcSVOeS1SfS1qdVFahTVai'
+'TVejTlijT1mmUF2gWV6hWFuoUl2qU1+sVWKkWmSlXWCuVmWoX2KxWGSzWWW1'
+'W2e2XGi4XWq6X2eoYWmrY2urZGqsYmutZG6wZm+xZ2u8YG2+Ym6/YnCxaHWz'
+'bnS0bHS7bXe9cHi5cXu4c3q6dHu7dXy8dny+dn2/d3DBZHDCZHLEZn/BeYG+'
+'eYDBeYHCe4LCe4LCfITDfoXEfYbEf4fFgIjDgYjGgorGg4vHhYzFg43IhY3I'
+'hpDIiJDKiZLKipLLi5XMjZXNjJnPkZ3QlJ/Sl6LUmaXVnKfWnqjXn6rYoavY'
+'oqzYoq3Zo67apa/aprDbpgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAACH5BAEAAHkALAAAAAAQABAAAAihAPMIHEiwoMGD'
+'BqdIISIkx40aMFyoMBgFz506c+K8YSPmiIiCQ+zQkQPnjZs2atYs0UBQBw4b'
+'M16sOFGkTBo0MiggFPiBg5ctRiDsFJihB5cqDYYK3IHFyYKCJApGaJKERwKC'
+'QX500IChggkmVKCEIAjkjBkyYLpouYLkCQsDA0f4GBPmC5csVpTQAFGg4IYY'
+'LSY8aMBAAQKEFyQoJehAaUAAOw==')
return img
def prefs():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAOdZAAAAAAEBAQICAgMDAwQEBAUFBQYGBgcHBwgICAkJCQoK'
+'CgsLCwwMDA0NDQ4ODg8PDxAQEBERERISEhMTExQUFBUVFRYWFhcXFxgYGBkZ'
+'GRoaGhsbGxwcHB0dHR4eHh8fHyAgICEhISIiIiMjIyQkJCUlJSYmJicnJygo'
+'KCkpKSoqKisrKywsLC0tLS4uLi8vLzAwMDExMTIyMjMzMzQ0NDU1NTY2Njc3'
+'Nzg4ODk5OTo6Ojs7Ozw8PD09PT4+Pj8/P0BAQEFBQUJCQkNDQ0REREVFRUZG'
+'RkdHR0hISElJSUpKSktLS0xMTE1NTU5OTk9PT1BQUFFRUVJSUlNTU1RUVFVV'
+'VVZWVldXV1hYWFlZWVpaWltbW1xcXF1dXV5eXl9fX2BgYGFhYWJiYmNjY2Rk'
+'ZGVlZWZmZmdnZ2hoaGlpaWpqamtra2xsbG1tbW5ubm9vb3BwcHFxcXJycnNz'
+'c3R0dHV1dXZ2dnd3d3h4eHl5eXp6ent7e3x8fH19fX5+fn9/f4CAgIGBgYKC'
+'goODg4SEhIWFhYaGhoeHh4iIiImJiYqKiouLi4yMjI2NjY6Ojo+Pj5CQkJGR'
+'kZKSkpOTk5SUlJWVlZaWlpeXl5iYmJmZmZqampubm5ycnJ2dnZ6enp+fn6Cg'
+'oKGhoaKioqOjo6SkpKWlpaampqenp6ioqKmpqaqqqqurq6ysrK2tra6urq+v'
+'r7CwsLGxsbKysrOzs7S0tLW1tba2tre3t7i4uLm5ubq6uru7u7y8vL29vb6+'
+'vr+/v8DAwMHBwcLCwsPDw8TExMXFxcbGxsfHx8jIyMnJycrKysvLy8zMzM3N'
+'zc7Ozs/Pz9DQ0NHR0dLS0tPT09TU1NXV1dbW1tfX19jY2NnZ2dra2tvb29zc'
+'3N3d3d7e3t/f3+Dg4OHh4eLi4uPj4+Tk5OXl5ebm5ufn5+jo6Onp6erq6uvr'
+'6+zs7O3t7e7u7u/v7/Dw8PHx8fLy8vPz8/T09PX19fb29vf39/j4+Pn5+fr6'
+'+vv7+/z8/P39/f7+/v///yH+EUNyZWF0ZWQgd2l0aCBHSU1QACH5BAEKAP8A'
+'LAAAAAAQABAAAAjSAP8JHPjvGDGCCAkeK0eOVUKB0Zwp+yfMWzdZ/1idGkWQ'
+'W7ttwnxZowaL1bJztwgS40Yt27aR2aQtQ5aHYDFqzJLp4hRKmLFgtxIJREYM'
+'GLZmvAQJfDRMWDNGhwYRS2ctmbNSBFkR02Ws25pf4qQZaxaLICpht4BJCzPq'
+'Eahnxph5WhqMVy4yWKYIZFUsWLFjqk4F81VLVRqCpqoVWxYt2TFmxXotc0OQ'
+'lzdTgwoRE9bGjSlpgAgGekPm36Blx8T8w+IkycN/gXrJ2vIaIZYoDwMCADs=')
return img
def windowprefs():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAPZnAD8/P0ZGRklJSU5OTlJSUlZWVlhYWF5eXldjeWhoaGxs'
+'bHNzc3h4eHt7e39/f2+xZnCxZ/iia/Oqe/WsfdTAbmd1i216j1FxqV6Bt2eH'
+'t2qGsWCFumKFumOHvWSIvWaLv2aLwGmOwW+QwWySx2yUyW+Wy3qaxHCUyHWZ'
+'yHCZznKaz3qezHOd0nWe03We1Hag1Xmi2Hmj2Xqk2nqk23qm3Hun3ICAgIaG'
+'hoeHh4iIiIqKiouLi4yMjI2NjY+Pj5qTjpSUlJmZmZ2dnaWlpaampqqoqKur'
+'q62tra6urrS0tLW1tba2tru7u7y8vL29vb6+vr+/v5/Sl4Ws3Y2x3ZK56MLC'
+'wsTExMfHx8vLy9DQ0NHR0dPT09jY2Nzc3N3d3d7e3uTk5PX19fv7+/z8/P39'
+'/f7+/v///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAGcA'
+'LAAAAAAQABAAAAe4gGeCg4SFglM0NDMwLy4sKiolJSaCUlSXmJmYImc1np8z'
+'M5+eNCBnM2apqmZiq2YdZzGuqmJhUKkbsbNmXVw9OakYZy9mD8ZmEkQ9SUdH'
+'FD0VZyzFUQ9jET9YSVpaR1gKZyvFxmMSRVhECw1HVhZnKK47UGANXkE4XkYK'
+'I64NRl45rADJoaXHAH6umFRJggPHESYIzlz48KHDhgwaGkARUgVKECgGDAk6'
+'QOAMEBtnBAAQSahAAEKBAAA7')
return img
def tilevertical():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAPZrAEBZl0Bal0FbmEVenUZin0pnoUhlo0tnpUtpo01tpU1q'
+'qE1qqlJxqFFwrlBxr1N0sVV2s1d5tll8sVl9uFp+uV2CvWGGumSFu2eLv2WL'
+'xWySxGuVzG6XznKUwXmax22d2G+f2XGc0XWe1HWf13mgz36j0nih1n+n1XGg'
+'2XOh2nSi2naj23ii2Xil232l2Xqm3Hun3Hym3H+o232o3X+q3Yiny4Op1oWt'
+'2YGr3oSt34Wu34av34mv24ux3Y2y35C035W234iw4Imx4Iuy4Iyz4Y6y4I61'
+'4Y+14omz5oq05oy1542254+355K24ZG14pC24pK34pO445S545W645++5JG4'
+'6JK56JS66JW76Ze86Zi96Z3A6p7B66XC5KDC66HD66PE7KTE7KXF7KbG7KfG'
+'7KjH7fv8/v39/v7+/v7+//7//////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAGwA'
+'LAAAAAAQABAAAAeUgGxsXVNSUD8+PDc2NjWCglRlZWKUYV6XXFwej1FRUFBG'
+'RkQ+QkI5ORqPUGusra5rGI8+r65maxaPJbSvDI8ku64JjxJAOTgyLiYhIRwb'
+'HQiPRFpZV1dVVUxMSkhIF484M+EvLSspKCgfHxOPMsCtEI8s7qwOjyPzawuP'
+'GfgCjwUVJkR44GDBAQMEBgAA8Kihw4eBAAA7')
return img
def tilehorizontal():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAPZTAEBZl0Bal0Jbl0NdmUNemkVgnEVgnUZinUlln0pnpEtp'
+'ok9spk5tpk5rqFJwrVFyslZ3sVR2tVt/tld8uV+EuF+DvGGEu16EwWKIwGeM'
+'wGOJxWeQyGuRyG6Yz3OUw3GXy3Wdz3efznOd03yi0Xih1nyk1X6m2n6p3YSi'
+'yZCtz4Gp1Iar1oKp2Yat24Cr3YKq3IKs3oat34Wu34ev34iv2Iyx3omx4Iqy'
+'4Iqy4Yyz4Y204Y+04I614pC24pG34pK24pO445S545W545e44pW645+94p++'
+'5Ji96Zq+6py/6p3A6p/B66XC5KHD66TE7KXF7KfG7KjH7anI7f///wAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAFQA'
+'LAAAAAAQABAAAAeEgFSCg4SFgkxENCsqKRZFOjYjISiCRlGXUU9OQ01LS0pH'
+'Rx5URERAPjo6OzY2MTEwMCcZVD5Ttrcxt7cWVDu6ti+/UxJUNcImwhBULcIk'
+'wg5ULMIiwg1UJcIdwglUIMIbwgZUH8IawgNUHMIXwgBUDBYWDAoIBgYTEQ8D'
+'AvyG/v8ABwUCADs=')
return img
def detachframe():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAPZMAFFxqVFyqVNzqVR1q1R1rFZ2rVd5rlh6sFh7sVp8slx+'
+'tV6Bt1+DuWCFumGFu2OHvWSIvmaLwGiOw2+QwWySyGyUyW+Wy2+XzHqaxHCZ'
+'znKaz3Oc0XOd0nWe1Hag1Xmi2Hmj2Xqk2nqk23qm3Hun3IWs3Yat3Y2x3ZCz'
+'3qe305K56N7n8uTq8+fr9ejs9urv9urw9+vy9+3z9+zy+O/z+u70+PD1+fL1'
+'+/H2+/P3+/T3+/T4+/X5+/f5+/b6+/f6+/b7+/f8/Pn7+/j7/Pn7/Pn8/Pj9'
+'/fr8/Pr9/fv9/fz+/v3//////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAE0A'
+'LAAAAAAQABAAAAefgE0nIyEfHRwaGhYWGE2OTSUqkpOUKhOPJCMjmSSZIZsj'
+'EY8hTKWmp0wPo0xBKYOFHRuJi40fTEmRlZUTHredoL+/oRultqioDxfFTEND'
+'NjYyLy0tK0wMFMtJQ0A7NjUxMC1MCwESEL1JSUE77DY0MUwKjxqm6Uk79ztM'
+'B4/Kt/b2gDAh8AjbsVMBHgkw94ABAgQGCgwQECDho4sYHwUCADs=')
return img
def closeframe():
img = tk.PhotoImage(format='gif',data=
'R0lGODlhEAAQAPZuALlEA7lFA7xCBLxDBb1NBK5LFsNXGt1kAeFoA+52AOp5'
+'Bfh2AO9/GP93Ef9/GMFaIMJdJMlkJcZhKsprLtJ2PeZ3IYZhX/+GIeuKQP+S'
+'TfSdY/WoclFxqVh7sVp8slx+tFx+tV6Bt1+DuWCFumGFu2OHvWSIvmaLwGiO'
+'w2+QwWySyGyUyW6Uym+Wy2+XzHqaxHCZznKaz3Oc0XOd0nWe1Hag1Xmi2Hmj'
+'2Xqk2nqk23qm3Hun3N+uk/a1jfe1jeGwlf7Nrv7WuIWs3Y2x3ZK56N7n8uTq'
+'8+fr9ejs9urv9urw9+vy9+3z9+zy+O/z+u70+PD1+fL1+/H2+/L3+vL3+/P3'
+'+/T3+/T4+/X4+/X5+/f5+/b6+/f7/Pn7+/j7/Pn7/Pj8/Pn8/Pj9/fn9/fr8'
+'/Pr9/fv9/fv+/vz9/fz+/v3+/vz///3///7//////wAAAAAAAAAAAAAAAAAA'
+'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAG8A'
+'LAAAAAAQABAAAAexgG+Cg4SFgkM6Ojk3NTQzMTEtKy+CQkSXmJmYKW87np87'
+'iaA6J285bqipqqklbzduXl5aVldPTEpGRkVuI65uZV2yWldSTU1KSG4hbzWo'
+'XWVlw1Kzxm4gbzSoZb/QwlJSbh5vM9zQ5l48EgMPFm8uv9rmPxQ9QRsTbyup'
+'2/wQPRkNLmAAoGJVqgFAUjEYwAHFCRMjRIDw4KGABgcLEiAAYGiQgQgVFBwg'
+'0LEQgAEc3wQCADs=')
return img
|
|
from pyglet.sprite import Sprite
from pyglet.text import Label
from pyglet import gl
from pyglet import graphics
from functions import *
class UI:
def __init__(self, window):
self.window = window
self.buttons = []
self.bars = []
self.progressbars = []
self.combat_text = []
self.auto_target_rings = []
self.target_rings = []
self.target_label = None
self.stats = None
self.settings = dict(
draw_mob_hp=True,
allways_draw_mob_hp=False,
redraw_time=0.5
)
self.redraw_timer = self.settings["redraw_time"]
self.bg_batch = self.window.batches["gui1"]
self.fg_batch = self.window.batches["gui2"]
self.bar_fg_batch = self.window.batches["gui3"]
self.behind_batch = self.window.batches["gui0"]
def add_button(
self, x, y, text="Default",
cb=None, cb_arg=None
):
button = Button(
self.window, x=x, y=y, text=text, callback=cb, callback_arg=cb_arg,
bg_batch=self.bg_batch, fg_batch=self.fg_batch
)
self.buttons.append(button)
def add_bar(
self, x, y,
text="Default", width=200, height=30, color="blue", shows="default"
):
bar = Bar(
self.window,
x=x, y=y,
text=text, w=width, h=height, c=color, s=shows,
bg_batch=self.bg_batch, fg_batch=self.bar_fg_batch
)
self.bars.append(bar)
def add_progressbar(
self, x, y, duration, w=64, h=10, title=None,
c="blue", bgc="dblue", tc="black"
):
b = ProgressBar(
self.window, x=x, y=y, w=w, h=h, c=c, bgc=bgc, tc=tc,
duration=duration,
fg_batch=self.fg_batch, title=title
)
self.progressbars.append(b)
def add_stats(self, owner, x, y, width, height):
self.stats = Stats(
self.window, owner, x=x, y=y, w=width, h=height,
bg_batch=self.bg_batch, fg_batch=self.fg_batch,
)
def add_combat_text(self, text, x, y, **kwargs):
ct = FloatingCombatText(
self, text, x, y, batch=self.fg_batch, **kwargs
)
self.combat_text.append(ct)
def update_bar(self, bartype, value, maxvalue):
for b in self.bars:
if b.type == bartype:
b.update(value, maxvalue)
def update_stats(self):
if self.stats and self.window.debug:
self.stats.update()
def check(self, x, y, press=True, dry=False):
if dry:
for b in self.bars:
if b.check(x, y):
return True
for b in self.buttons:
if b.check(x, y):
return True
if self.stats:
if self.stats.check(x, y):
return True
else:
for b in self.bars:
if b.check(x, y):
return True
for b in self.buttons:
if press:
if b.check(x, y):
b.press()
return True
else:
if b.pressed:
if b.check(x, y):
b.release()
return True
else:
b.release(do_action=False)
if self.stats:
if self.stats.check(x, y):
return True
return False
def update(self, dt):
if self.redraw_timer >= self.settings["redraw_time"]:
if self.window.game.player:
p = self.window.game.player
self.update_bar("hp", int(p.hp), p.max_hp)
self.update_bar("mp", int(p.mp), p.max_mp)
self.update_bar("sta", int(p.sta), p.max_sta)
self.update_stats()
self.redraw_timer = 0
else:
self.redraw_timer += dt
if self.stats:
if self.window.debug and self.stats.hidden:
self.stats.toggle_hide(False)
elif not self.window.debug and not self.stats.hidden:
self.stats.toggle_hide(True)
p = self.window.game.player
if p:
if p.cast_object:
if not self.progressbars:
barpos = self.window.get_windowpos(p.x, p.y + 24)
fg_c, bg_c, tc = get_color_scheme_by_type(
p.cast_object.ability.ability_attr["magic_type"]
)
self.add_progressbar(
*barpos, p.cast_object.time,
w=40, h=6, c=fg_c, bgc=bg_c, tc=tc,
title=p.cast_object.ability.get_name()
)
else:
for b in self.progressbars:
b.update(p.cast_object.timer)
else:
for b in self.progressbars:
b.label.delete()
if hasattr(b, "title"):
b.title.delete()
self.progressbars = []
t = p.target
if t:
if not self.target_rings:
tr = TargetRing(
self.window, t.x, t.y,
w=t.sprite.width, h=t.sprite.height,
batch=self.behind_batch
)
self.target_rings.append(tr)
else:
for r in self.target_rings:
r.update(t.x, t.y)
if not self.target_label:
self.target_label = Label(
text=t.name, font_name=None, font_size=12,
x=self.window.width // 2, y=100,
anchor_x="center", anchor_y="center",
color=(0, 0, 0, 255)
)
else:
self.target_label.text = t.name
else:
if self.target_rings:
for r in self.target_rings:
r.sprite.delete()
self.target_rings = []
if self.target_label:
self.target_label = None
else:
for b in self.progressbars:
b.label.delete()
if hasattr(b, "title"):
b.title.delete()
self.progressbars = []
for ct in self.combat_text:
ct.update(dt)
def draw(self):
if self.settings["draw_mob_hp"]:
gl.glColor4f(*lookup_color("red", gl=True))
gl.glLineWidth(3)
for e in self.window.game.enemies:
if e.hp < e.max_hp or self.settings["allways_draw_mob_hp"]:
wpos = self.window.get_windowpos(
e.x, e.y + e.sprite.height,
check=True, tol=32, precise=True
)
if wpos:
width = (e.hp / e.max_hp) * e.sprite.width
graphics.draw(
2,
gl.GL_LINES,
(
'v2f',
(
wpos[0] - width / 2,
wpos[1],
wpos[0] + width / 2,
wpos[1])
)
)
# for b in self.buttons:
# b.draw()
self.bg_batch.draw()
if self.window.game.player:
for b in self.bars:
b.draw()
self.bar_fg_batch.draw()
if self.stats:
self.stats.draw()
for b in self.progressbars:
b.draw()
self.fg_batch.draw()
if self.target_label:
self.target_label.draw()
class TargetRing:
def __init__(self, window, x, y, w=32, h=32, batch=None):
self.window = window
self.x, self.y = window.get_windowpos(x, y, precise=True)
self.gamepos = (x, y)
img = window.textures["redcircle"]
scale = w / img.width
self.sprite = Sprite(
img,
x=self.x, y=self.y,
batch=batch
)
self.sprite.scale = scale
def update(self, x, y):
w = self.window
self.sprite.x, self.sprite.y = self.x, self.y = w.get_windowpos(
x, y, precise=True
)
class Button:
def __init__(
self, window, x=20, y=20, text="Default",
callback=None, callback_arg=None,
bg_batch=None, fg_batch=None
):
self.window = window
self.img = window.textures["button"]
self.img_down = window.textures["button_down"]
self.sprite = Sprite(self.img, x=x, y=y, batch=bg_batch)
self.label = Label(
text=text, font_name=None, font_size=10,
x=x, y=y, anchor_x="center", anchor_y="center",
color=(0, 0, 0, 255), batch=fg_batch
)
self.width, self.height = self.img.width, self.img.height
self.x, self.y = x, y
self.pressed = False
self.callback = callback
self.callback_arg = callback_arg
def set_pos(self, x, y):
self.sprite.x, self.sprite.y = x, y
self.label.x, self.label.y = x, y
self.x, self.y = x, y
def press(self):
self.pressed = True
self.sprite.image = self.img_down
def release(self, do_action=True):
self.pressed = False
self.sprite.image = self.img
if do_action:
if self.callback_arg:
self.callback(self.callback_arg)
else:
self.callback()
def check(self, x, y):
if (
x >= self.x - self.width / 2 and
x < self.x + self.width / 2 and
y >= self.y - self.height / 2 and
y < self.y + self.height / 2
):
return True
else:
return False
def draw(self):
pass
class Bar:
def __init__(
self, window, x=20, y=20,
text="Default", w=100, h=20, c="blue", s="default",
bg_batch=None, fg_batch=None
):
self.window = window
self.color = lookup_color(c, gl=True)
self.label = Label(
text=text, font_name=None, font_size=9,
x=x, y=y, anchor_x="center", anchor_y="center",
color=(255, 255, 255, 255), batch=fg_batch
)
self.width, self.height = w, h
self.x, self.y = x, y
self.value = 100
self.type = s
self.value_max = 100
self.update(self.value, self.value_max)
def set_pos(self, x, y):
self.sprite.x, self.sprite.y = x, y
self.label.x, self.label.y = x, y
self.x, self.y = x, y
def check(self, x, y):
if (
x >= self.x - self.width / 2 and
x < self.x + self.width / 2 and
y >= self.y - self.height / 2 and
y < self.y + self.height / 2
):
return True
else:
return False
def update(self, value, maxvalue):
self.value_max = maxvalue
if not self.value == value:
self.value = value
w = self.width * (self.value / self.value_max)
self.rectangle = create_rectangle(
self.x - self.width / 2, self.y + self.height / 2,
w, self.height,
centered=False
)
self.label.x = int((self.x - self.width / 2) + w / 2)
self.label.text = str(value)
def draw(self):
try:
gl.glEnable(gl.GL_BLEND)
gl.glColor4f(*self.color)
graphics.draw(4, gl.GL_QUADS, ('v2f', self.rectangle))
gl.glDisable(gl.GL_BLEND)
# self.label.draw()
except AttributeError as e:
self.window.logger.debug(
"Bar is not ready to be drawn: {0}".format(e)
)
class FloatingCombatText:
def __init__(
self, ui, text, x, y, duration=1.,
scale=1., second_scale=0.5, growth=0.2, velocity=75,
color="darkred", batch=None
):
self.start_scale = scale
self.second_scale = second_scale
self.growth = growth
self.x, self.y = x, y
self.ui = ui
wx, wy = self.ui.window.get_windowpos(x, y)
self.color = lookup_color(color)
self.label = Label(
text=str(text), font_name=None, font_size=12 * scale,
x=wx, y=wy, anchor_x="center", anchor_y="center",
color=self.color, batch=batch,
)
self.velocity = velocity
self.timer = duration
self.duration = duration
self.done = False
def on_end(self):
self.label.delete()
self.done = True
def update(self, dt):
if self.timer <= 0:
self.on_end()
else:
self.timer -= dt
perc = self.timer / self.duration
scale = (
self.second_scale + (
(self.start_scale - self.second_scale) * perc
)
)
self.y += self.velocity * dt
self.label.font_size = 9 * scale
self.label.x, self.label.y = self.ui.window.get_windowpos(
self.x, self.y, precise=True
)
opacity = int(255 * perc)
if opacity < 0:
opacity = 0
self.color = self.color[0], self.color[1], self.color[2], opacity
self.label.color = self.color
class ProgressBar:
def __init__(
self, window, x=20, y=20, duration=1.,
text="Default", w=64, h=20, c="blue", bgc="dblue", tc="black",
bg_batch=None, fg_batch=None, title=None
):
self.window = window
self.text_color = lookup_color(tc)
self.color = lookup_color(c, gl=True)
self.bg_color = lookup_color(bgc, gl=True)
self.width, self.height = w, h
self.x, self.y = x, y
self.value = 0
self.value_max = duration
self.update(self.value)
self.bg_rectangle = create_rectangle(
x - w / 2, y + h / 2,
w, h,
centered=False
)
self.label = Label(
text=str(self.value_max), font_name=None, font_size=7,
x=x, y=y, anchor_x="center", anchor_y="center",
color=self.text_color, batch=fg_batch
)
if title:
self.title = Label(
text=title, font_name=None, font_size=7,
x=x, y=y + 10, anchor_x="center", anchor_y="center",
color=(255, 255, 255, 255), batch=fg_batch
)
def update(self, value):
if not self.value == value:
self.value = value
w = self.width * (self.value / self.value_max)
self.rectangle = create_rectangle(
self.x - self.width / 2, self.y + self.height / 2,
w, self.height,
centered=False
)
self.label.text = str(round(self.value_max - self.value, 1))
def draw(self):
try:
gl.glEnable(gl.GL_BLEND)
gl.glColor4f(*self.bg_color)
graphics.draw(4, gl.GL_QUADS, ('v2f', self.bg_rectangle))
gl.glColor4f(*self.color)
graphics.draw(4, gl.GL_QUADS, ('v2f', self.rectangle))
gl.glDisable(gl.GL_BLEND)
# self.label.draw()
except AttributeError as e:
self.window.logger.debug(
"Bar is not ready to be drawn: {0}".format(e)
)
class Stats:
def __init__(
self, window, owner, x=50, y=200, w=180, h=300,
bg_batch=None, fg_batch=None, hidden=True
):
self.x, self.y, self.width, self.height = x, y, w, h
self.bg_batch, self.fg_batch = bg_batch, fg_batch
self.owner, self.window = owner, window
self.color = lookup_color("grey", gl=True)
self.hidden = hidden
self.build()
def check(self, x, y):
if (
x >= self.x - self.width / 2 and
x < self.x + self.width / 2 and
y >= self.y - self.height / 2 and
y < self.y + self.height / 2
):
return True
else:
return False
def build(self):
if not self.hidden and self.window.debug:
batch = self.fg_batch
self.stat_labels_l = []
self.stat_labels_r = []
y = self.y
yo = 0
x = self.x
mainstat = self.owner.mainstat
if mainstat == "str":
c = lookup_color("darkred")
elif mainstat == "agi":
c = lookup_color("darkgreen")
elif mainstat == "int":
c = lookup_color("darkblue")
else:
c = lookup_color("yellow")
label_l = Label(
text="Main stat:", font_name=None, font_size=10,
x=x, y=y-yo, anchor_x="left", anchor_y="top",
color=lookup_color("black"), batch=batch
)
label_r = Label(
text=mainstat, font_name=None, font_size=10,
x=x+self.width, y=y-yo,
anchor_x="right", anchor_y="top",
color=c, batch=batch
)
self.stat_labels_l.append(label_l)
self.stat_labels_l.append(label_r)
yo += 16
for stat, value in self.owner.base_stats.items():
modvalue = self.owner.stats.get(stat)
label_l = Label(
text=str(stat), font_name=None, font_size=8,
x=x, y=y-yo, anchor_x="left", anchor_y="top",
color=lookup_color("black", opacity=255),
batch=batch
)
label_r = Label(
text=str(modvalue), font_name=None, font_size=8,
x=x+self.width, y=y-yo,
anchor_x="right", anchor_y="top",
color=lookup_color("darkblue", opacity=255),
batch=batch
)
label_r.identifier = str(stat)
self.stat_labels_l.append(label_l)
self.stat_labels_r.append(label_r)
yo += 12
self.height = yo
self.rectangle = create_rectangle(
self.x, self.y,
self.width, self.height,
centered=False
)
def toggle_hide(self, hidden):
print("Hidden: {0}".format(hidden))
if hidden:
self.hidden = True
for l in self.stat_labels_l:
l.delete()
for l in self.stat_labels_r:
l.delete()
self.stat_labels_l = []
self.stat_labels_r = []
else:
self.hidden = False
self.build()
def update(self):
for bs, value in self.owner.stats.get_all_stats().items():
for l in self.stat_labels_r:
if l.identifier == bs:
value = round(self.owner.stats.get(bs), 1)
l.text = str(value)
break
def draw(self):
if self.window.debug and not self.hidden:
gl.glColor4f(*self.color)
graphics.draw(4, gl.GL_QUADS, ('v2f', self.rectangle))
if not self.fg_batch:
for l in self.stat_labels_l:
l.draw()
for l in self.stat_labels_r:
l.draw()
|
|
"""SCons.Subst
SCons string substitution.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Subst.py 4369 2009/09/19 15:58:29 scons"
import re
import string
import types
import UserList
import UserString
import SCons.Errors
from SCons.Util import is_String, is_Sequence
# Indexed by the SUBST_* constants below.
_strconv = [SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_signature]
AllowableExceptions = (IndexError, NameError)
def SetAllowableExceptions(*excepts):
global AllowableExceptions
AllowableExceptions = filter(None, excepts)
def raise_exception(exception, target, s):
name = exception.__class__.__name__
msg = "%s `%s' trying to evaluate `%s'" % (name, exception, s)
if target:
raise SCons.Errors.BuildError, (target[0], msg)
else:
raise SCons.Errors.UserError, msg
class Literal:
"""A wrapper for a string. If you use this object wrapped
around a string, then it will be interpreted as literal.
When passed to the command interpreter, all special
characters will be escaped."""
def __init__(self, lstr):
self.lstr = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.lstr
def is_literal(self):
return 1
class SpecialAttrWrapper:
"""This is a wrapper for what we call a 'Node special attribute.'
This is any of the attributes of a Node that we can reference from
Environment variable substitution, such as $TARGET.abspath or
$SOURCES[1].filebase. We implement the same methods as Literal
so we can handle special characters, plus a for_signature method,
such that we can return some canonical string during signature
calculation to avoid unnecessary rebuilds."""
def __init__(self, lstr, for_signature=None):
"""The for_signature parameter, if supplied, will be the
canonical string we return from for_signature(). Else
we will simply return lstr."""
self.lstr = lstr
if for_signature:
self.forsig = for_signature
else:
self.forsig = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.forsig
def is_literal(self):
return 1
def quote_spaces(arg):
"""Generic function for putting double quotes around any string that
has white space in it."""
if ' ' in arg or '\t' in arg:
return '"%s"' % arg
else:
return str(arg)
class CmdStringHolder(UserString.UserString):
"""This is a special class used to hold strings generated by
scons_subst() and scons_subst_list(). It defines a special method
escape(). When passed a function with an escape algorithm for a
particular platform, it will return the contained string with the
proper escape sequences inserted.
"""
def __init__(self, cmd, literal=None):
UserString.UserString.__init__(self, cmd)
self.literal = literal
def is_literal(self):
return self.literal
def escape(self, escape_func, quote_func=quote_spaces):
"""Escape the string with the supplied function. The
function is expected to take an arbitrary string, then
return it with all special characters escaped and ready
for passing to the command interpreter.
After calling this function, the next call to str() will
return the escaped string.
"""
if self.is_literal():
return escape_func(self.data)
elif ' ' in self.data or '\t' in self.data:
return quote_func(self.data)
else:
return self.data
def escape_list(list, escape_func):
"""Escape a list of arguments by running the specified escape_func
on every object in the list that has an escape() method."""
def escape(obj, escape_func=escape_func):
try:
e = obj.escape
except AttributeError:
return obj
else:
return e(escape_func)
return map(escape, list)
class NLWrapper:
"""A wrapper class that delays turning a list of sources or targets
into a NodeList until it's needed. The specified function supplied
when the object is initialized is responsible for turning raw nodes
into proxies that implement the special attributes like .abspath,
.source, etc. This way, we avoid creating those proxies just
"in case" someone is going to use $TARGET or the like, and only
go through the trouble if we really have to.
In practice, this might be a wash performance-wise, but it's a little
cleaner conceptually...
"""
def __init__(self, list, func):
self.list = list
self.func = func
def _return_nodelist(self):
return self.nodelist
def _gen_nodelist(self):
list = self.list
if list is None:
list = []
elif not is_Sequence(list):
list = [list]
# The map(self.func) call is what actually turns
# a list into appropriate proxies.
self.nodelist = SCons.Util.NodeList(map(self.func, list))
self._create_nodelist = self._return_nodelist
return self.nodelist
_create_nodelist = _gen_nodelist
class Targets_or_Sources(UserList.UserList):
"""A class that implements $TARGETS or $SOURCES expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access the list, calling the NLWrapper to create proxies on demand.
Note that we subclass UserList.UserList purely so that the
is_Sequence() function will identify an object of this class as
a list during variable expansion. We're not really using any
UserList.UserList methods in practice.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
return getattr(nl, attr)
def __getitem__(self, i):
nl = self.nl._create_nodelist()
return nl[i]
def __getslice__(self, i, j):
nl = self.nl._create_nodelist()
i = max(i, 0); j = max(j, 0)
return nl[i:j]
def __str__(self):
nl = self.nl._create_nodelist()
return str(nl)
def __repr__(self):
nl = self.nl._create_nodelist()
return repr(nl)
class Target_or_Source:
"""A class that implements $TARGET or $SOURCE expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access an individual proxy Node, calling the NLWrapper to create
a proxy on demand.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
try:
nl0 = nl[0]
except IndexError:
# If there is nothing in the list, then we have no attributes to
# pass through, so raise AttributeError for everything.
raise AttributeError, "NodeList has no attribute: %s" % attr
return getattr(nl0, attr)
def __str__(self):
nl = self.nl._create_nodelist()
if nl:
return str(nl[0])
return ''
def __repr__(self):
nl = self.nl._create_nodelist()
if nl:
return repr(nl[0])
return ''
class NullNodeList(SCons.Util.NullSeq):
def __call__(self, *args, **kwargs): return ''
def __str__(self): return ''
# TODO(1.5): unneeded after new-style classes introduce iterators
def __getitem__(self, i):
raise IndexError
NullNodesList = NullNodeList()
def subst_dict(target, source):
"""Create a dictionary for substitution of special
construction variables.
This translates the following special arguments:
target - the target (object or array of objects),
used to generate the TARGET and TARGETS
construction variables
source - the source (object or array of objects),
used to generate the SOURCES and SOURCE
construction variables
"""
dict = {}
if target:
def get_tgt_subst_proxy(thing):
try:
subst_proxy = thing.get_subst_proxy()
except AttributeError:
subst_proxy = thing # probably a string, just return it
return subst_proxy
tnl = NLWrapper(target, get_tgt_subst_proxy)
dict['TARGETS'] = Targets_or_Sources(tnl)
dict['TARGET'] = Target_or_Source(tnl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_TARGETS'] = '$TARGETS'
dict['UNCHANGED_TARGETS'] = '$TARGETS'
else:
dict['TARGETS'] = NullNodesList
dict['TARGET'] = NullNodesList
if source:
def get_src_subst_proxy(node):
try:
rfile = node.rfile
except AttributeError:
pass
else:
node = rfile()
try:
return node.get_subst_proxy()
except AttributeError:
return node # probably a String, just return it
snl = NLWrapper(source, get_src_subst_proxy)
dict['SOURCES'] = Targets_or_Sources(snl)
dict['SOURCE'] = Target_or_Source(snl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_SOURCES'] = '$SOURCES'
dict['UNCHANGED_SOURCES'] = '$SOURCES'
else:
dict['SOURCES'] = NullNodesList
dict['SOURCE'] = NullNodesList
return dict
# Constants for the "mode" parameter to scons_subst_list() and
# scons_subst(). SUBST_RAW gives the raw command line. SUBST_CMD
# gives a command line suitable for passing to a shell. SUBST_SIG
# gives a command line appropriate for calculating the signature
# of a command line...if this changes, we should rebuild.
SUBST_CMD = 0
SUBST_RAW = 1
SUBST_SIG = 2
_rm = re.compile(r'\$[()]')
_remove = re.compile(r'\$\([^\$]*(\$[^\)][^\$]*)*\$\)')
# Indexed by the SUBST_* constants above.
_regex_remove = [ _rm, None, _remove ]
def _rm_list(list):
#return [ l for l in list if not l in ('$(', '$)') ]
return filter(lambda l: not l in ('$(', '$)'), list)
def _remove_list(list):
result = []
do_append = result.append
for l in list:
if l == '$(':
do_append = lambda x: None
elif l == '$)':
do_append = result.append
else:
do_append(l)
return result
# Indexed by the SUBST_* constants above.
_list_remove = [ _rm_list, None, _remove_list ]
# Regular expressions for splitting strings and handling substitutions,
# for use by the scons_subst() and scons_subst_list() functions:
#
# The first expression compiled matches all of the $-introduced tokens
# that we need to process in some way, and is used for substitutions.
# The expressions it matches are:
#
# "$$"
# "$("
# "$)"
# "$variable" [must begin with alphabetic or underscore]
# "${any stuff}"
#
# The second expression compiled is used for splitting strings into tokens
# to be processed, and it matches all of the tokens listed above, plus
# the following that affect how arguments do or don't get joined together:
#
# " " [white space]
# "non-white-space" [without any dollar signs]
# "$" [single dollar sign]
#
_dollar_exps_str = r'\$[\$\(\)]|\$[_a-zA-Z][\.\w]*|\${[^}]*}'
_dollar_exps = re.compile(r'(%s)' % _dollar_exps_str)
_separate_args = re.compile(r'(%s|\s+|[^\s\$]+|\$)' % _dollar_exps_str)
# This regular expression is used to replace strings of multiple white
# space characters in the string result from the scons_subst() function.
_space_sep = re.compile(r'[\t ]+(?![^{]*})')
def scons_subst(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Expand a string or list containing construction variable
substitutions.
This is the work-horse function for substitutions in file names
and the like. The companion scons_subst_list() function (below)
handles separating command lines into lists of arguments, so see
that function if that's what you're looking for.
"""
if type(strSubst) == types.StringType and string.find(strSubst, '$') < 0:
return strSubst
class StringSubber:
"""A class to construct the results of a scons_subst() call.
This binds a specific construction environment, mode, target and
source with two methods (substitute() and expand()) that handle
the expansion.
"""
def __init__(self, env, mode, conv, gvars):
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
def expand(self, s, lvars):
"""Expand a single "token" as necessary, returning an
appropriate string containing the expansion.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
return s
if s0 != '$':
return s
if s1 == '$':
return '$'
elif s1 in '()':
return s
else:
key = s[1:]
if key[0] == '{' or string.find(key, '.') >= 0:
if key[0] == '{':
key = key[1:-1]
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception, e:
if e.__class__ in AllowableExceptions:
return ''
raise_exception(e, lvars['TARGETS'], s)
else:
if lvars.has_key(key):
s = lvars[key]
elif self.gvars.has_key(key):
s = self.gvars[key]
elif not NameError in AllowableExceptions:
raise_exception(NameError(key), lvars['TARGETS'], s)
else:
return ''
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
#
# This could potentially be optimized by only
# copying lvars when s contains more expansions,
# but lvars is usually supposed to be pretty
# small, and deeply nested variable expansions
# are probably more the exception than the norm,
# so it should be tolerable for now.
lv = lvars.copy()
var = string.split(key, '.')[0]
lv[var] = ''
return self.substitute(s, lv)
elif is_Sequence(s):
def func(l, conv=self.conv, substitute=self.substitute, lvars=lvars):
return conv(substitute(l, lvars))
return map(func, s)
elif callable(s):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
except TypeError:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
return s
s = self.conv(s)
return self.substitute(s, lvars)
elif s is None:
return ''
else:
return s
def substitute(self, args, lvars):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
try:
def sub_match(match, conv=self.conv, expand=self.expand, lvars=lvars):
return conv(expand(match.group(1), lvars))
result = _dollar_exps.sub(sub_match, args)
except TypeError:
# If the internal conversion routine doesn't return
# strings (it could be overridden to return Nodes, for
# example), then the 1.5.2 re module will throw this
# exception. Back off to a slower, general-purpose
# algorithm that works for all data types.
args = _separate_args.findall(args)
result = []
for a in args:
result.append(self.conv(self.expand(a, lvars)))
if len(result) == 1:
result = result[0]
else:
result = string.join(map(str, result), '')
return result
else:
return self.expand(args, lvars)
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if not lvars.has_key('TARGET'):
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ss = StringSubber(env, mode, conv, gvars)
result = ss.substitute(strSubst, lvars)
try:
del gvars['__builtins__']
except KeyError:
pass
if is_String(result):
# Remove $(-$) pairs and any stuff in between,
# if that's appropriate.
remove = _regex_remove[mode]
if remove:
result = remove.sub('', result)
if mode != SUBST_RAW:
# Compress strings of white space characters into
# a single space.
result = string.strip(_space_sep.sub(' ', result))
elif is_Sequence(result):
remove = _list_remove[mode]
if remove:
result = remove(result)
return result
#Subst_List_Strings = {}
def scons_subst_list(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Substitute construction variables in a string (or list or other
object) and separate the arguments into a command list.
The companion scons_subst() function (above) handles basic
substitutions within strings, so see that function instead
if that's what you're looking for.
"""
# try:
# Subst_List_Strings[strSubst] = Subst_List_Strings[strSubst] + 1
# except KeyError:
# Subst_List_Strings[strSubst] = 1
# import SCons.Debug
# SCons.Debug.caller_trace(1)
class ListSubber(UserList.UserList):
"""A class to construct the results of a scons_subst_list() call.
Like StringSubber, this class binds a specific construction
environment, mode, target and source with two methods
(substitute() and expand()) that handle the expansion.
In addition, however, this class is used to track the state of
the result(s) we're gathering so we can do the appropriate thing
whenever we have to append another word to the result--start a new
line, start a new word, append to the current word, etc. We do
this by setting the "append" attribute to the right method so
that our wrapper methods only need ever call ListSubber.append(),
and the rest of the object takes care of doing the right thing
internally.
"""
def __init__(self, env, mode, conv, gvars):
UserList.UserList.__init__(self, [])
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
if self.mode == SUBST_RAW:
self.add_strip = lambda x, s=self: s.append(x)
else:
self.add_strip = lambda x, s=self: None
self.in_strip = None
self.next_line()
def expand(self, s, lvars, within_list):
"""Expand a single "token" as necessary, appending the
expansion to the current result.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
self.append(s)
return
if s0 != '$':
self.append(s)
return
if s1 == '$':
self.append('$')
elif s1 == '(':
self.open_strip('$(')
elif s1 == ')':
self.close_strip('$)')
else:
key = s[1:]
if key[0] == '{' or string.find(key, '.') >= 0:
if key[0] == '{':
key = key[1:-1]
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception, e:
if e.__class__ in AllowableExceptions:
return
raise_exception(e, lvars['TARGETS'], s)
else:
if lvars.has_key(key):
s = lvars[key]
elif self.gvars.has_key(key):
s = self.gvars[key]
elif not NameError in AllowableExceptions:
raise_exception(NameError(), lvars['TARGETS'], s)
else:
return
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
lv = lvars.copy()
var = string.split(key, '.')[0]
lv[var] = ''
self.substitute(s, lv, 0)
self.this_word()
elif is_Sequence(s):
for a in s:
self.substitute(a, lvars, 1)
self.next_word()
elif callable(s):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
except TypeError:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
self.append(s)
return
s = self.conv(s)
self.substitute(s, lvars, within_list)
elif s is None:
self.this_word()
else:
self.append(s)
def substitute(self, args, lvars, within_list):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
args = _separate_args.findall(args)
for a in args:
if a[0] in ' \t\n\r\f\v':
if '\n' in a:
self.next_line()
elif within_list:
self.append(a)
else:
self.next_word()
else:
self.expand(a, lvars, within_list)
else:
self.expand(args, lvars, within_list)
def next_line(self):
"""Arrange for the next word to start a new line. This
is like starting a new word, except that we have to append
another line to the result."""
UserList.UserList.append(self, [])
self.next_word()
def this_word(self):
"""Arrange for the next word to append to the end of the
current last word in the result."""
self.append = self.add_to_current_word
def next_word(self):
"""Arrange for the next word to start a new word."""
self.append = self.add_new_word
def add_to_current_word(self, x):
"""Append the string x to the end of the current last word
in the result. If that is not possible, then just add
it as a new word. Make sure the entire concatenated string
inherits the object attributes of x (in particular, the
escape function) by wrapping it as CmdStringHolder."""
if not self.in_strip or self.mode != SUBST_SIG:
try:
current_word = self[-1][-1]
except IndexError:
self.add_new_word(x)
else:
# All right, this is a hack and it should probably
# be refactored out of existence in the future.
# The issue is that we want to smoosh words together
# and make one file name that gets escaped if
# we're expanding something like foo$EXTENSION,
# but we don't want to smoosh them together if
# it's something like >$TARGET, because then we'll
# treat the '>' like it's part of the file name.
# So for now, just hard-code looking for the special
# command-line redirection characters...
try:
last_char = str(current_word)[-1]
except IndexError:
last_char = '\0'
if last_char in '<>|':
self.add_new_word(x)
else:
y = current_word + x
# We used to treat a word appended to a literal
# as a literal itself, but this caused problems
# with interpreting quotes around space-separated
# targets on command lines. Removing this makes
# none of the "substantive" end-to-end tests fail,
# so we'll take this out but leave it commented
# for now in case there's a problem not covered
# by the test cases and we need to resurrect this.
#literal1 = self.literal(self[-1][-1])
#literal2 = self.literal(x)
y = self.conv(y)
if is_String(y):
#y = CmdStringHolder(y, literal1 or literal2)
y = CmdStringHolder(y, None)
self[-1][-1] = y
def add_new_word(self, x):
if not self.in_strip or self.mode != SUBST_SIG:
literal = self.literal(x)
x = self.conv(x)
if is_String(x):
x = CmdStringHolder(x, literal)
self[-1].append(x)
self.append = self.add_to_current_word
def literal(self, x):
try:
l = x.is_literal
except AttributeError:
return None
else:
return l()
def open_strip(self, x):
"""Handle the "open strip" $( token."""
self.add_strip(x)
self.in_strip = 1
def close_strip(self, x):
"""Handle the "close strip" $) token."""
self.add_strip(x)
self.in_strip = None
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if not lvars.has_key('TARGET'):
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ls = ListSubber(env, mode, conv, gvars)
ls.substitute(strSubst, lvars, 0)
try:
del gvars['__builtins__']
except KeyError:
pass
return ls.data
def scons_subst_once(strSubst, env, key):
"""Perform single (non-recursive) substitution of a single
construction variable keyword.
This is used when setting a variable when copying or overriding values
in an Environment. We want to capture (expand) the old value before
we override it, so people can do things like:
env2 = env.Clone(CCFLAGS = '$CCFLAGS -g')
We do this with some straightforward, brute-force code here...
"""
if type(strSubst) == types.StringType and string.find(strSubst, '$') < 0:
return strSubst
matchlist = ['$' + key, '${' + key + '}']
val = env.get(key, '')
def sub_match(match, val=val, matchlist=matchlist):
a = match.group(1)
if a in matchlist:
a = val
if is_Sequence(a):
return string.join(map(str, a))
else:
return str(a)
if is_Sequence(strSubst):
result = []
for arg in strSubst:
if is_String(arg):
if arg in matchlist:
arg = val
if is_Sequence(arg):
result.extend(arg)
else:
result.append(arg)
else:
result.append(_dollar_exps.sub(sub_match, arg))
else:
result.append(arg)
return result
elif is_String(strSubst):
return _dollar_exps.sub(sub_match, strSubst)
else:
return strSubst
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
import difflib
from lxml import etree
from lxml.html import fragment_fromstring
import cgi
import re
__all__ = ['html_annotate', 'htmldiff']
try:
_unicode = unicode
except NameError:
# Python 3
_unicode = str
try:
basestring = __builtins__["basestring"]
except (KeyError, NameError):
# Python 3
basestring = str
############################################################
## Annotation
############################################################
def default_markup(text, version):
return '<span title="%s">%s</span>' % (
cgi.escape(_unicode(version), 1), text)
def html_annotate(doclist, markup=default_markup):
"""
doclist should be ordered from oldest to newest, like::
>>> version1 = 'Hello World'
>>> version2 = 'Goodbye World'
>>> print(html_annotate([(version1, 'version 1'),
... (version2, 'version 2')]))
<span title="version 2">Goodbye</span> <span title="version 1">World</span>
The documents must be *fragments* (str/UTF8 or unicode), not
complete documents
The markup argument is a function to markup the spans of words.
This function is called like markup('Hello', 'version 2'), and
returns HTML. The first argument is text and never includes any
markup. The default uses a span with a title:
>>> print(default_markup('Some Text', 'by Joe'))
<span title="by Joe">Some Text</span>
"""
# The basic strategy we have is to split the documents up into
# logical tokens (which are words with attached markup). We then
# do diffs of each of the versions to track when a token first
# appeared in the document; the annotation attached to the token
# is the version where it first appeared.
tokenlist = [tokenize_annotated(doc, version)
for doc, version in doclist]
cur_tokens = tokenlist[0]
for tokens in tokenlist[1:]:
html_annotate_merge_annotations(cur_tokens, tokens)
cur_tokens = tokens
# After we've tracked all the tokens, we can combine spans of text
# that are adjacent and have the same annotation
cur_tokens = compress_tokens(cur_tokens)
# And finally add markup
result = markup_serialize_tokens(cur_tokens, markup)
return ''.join(result).strip()
def tokenize_annotated(doc, annotation):
"""Tokenize a document and add an annotation attribute to each token
"""
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens
def html_annotate_merge_annotations(tokens_old, tokens_new):
"""Merge the annotations from tokens_old into tokens_new, when the
tokens in the new document already existed in the old document.
"""
s = InsensitiveSequenceMatcher(a=tokens_old, b=tokens_new)
commands = s.get_opcodes()
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
eq_old = tokens_old[i1:i2]
eq_new = tokens_new[j1:j2]
copy_annotations(eq_old, eq_new)
def copy_annotations(src, dest):
"""
Copy annotations from the tokens listed in src to the tokens in dest
"""
assert len(src) == len(dest)
for src_tok, dest_tok in zip(src, dest):
dest_tok.annotation = src_tok.annotation
def compress_tokens(tokens):
"""
Combine adjacent tokens when there is no HTML between the tokens,
and they share an annotation
"""
result = [tokens[0]]
for tok in tokens[1:]:
if (not result[-1].post_tags and
not tok.pre_tags and
result[-1].annotation == tok.annotation):
compress_merge_back(result, tok)
else:
result.append(tok)
return result
def compress_merge_back(tokens, tok):
""" Merge tok into the last element of tokens (modifying the list of
tokens in-place). """
last = tokens[-1]
if type(last) is not token or type(tok) is not token:
tokens.append(tok)
else:
text = _unicode(last)
if last.trailing_whitespace:
text += ' '
text += tok
merged = token(text,
pre_tags=last.pre_tags,
post_tags=tok.post_tags,
trailing_whitespace=tok.trailing_whitespace)
merged.annotation = last.annotation
tokens[-1] = merged
def markup_serialize_tokens(tokens, markup_func):
"""
Serialize the list of tokens into a list of text chunks, calling
markup_func around text to add annotations.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
html = token.html()
html = markup_func(html, token.annotation)
if token.trailing_whitespace:
html += ' '
yield html
for post in token.post_tags:
yield post
############################################################
## HTML Diffs
############################################################
def htmldiff(old_html, new_html):
## FIXME: this should take parsed documents too, and use their body
## or other content.
""" Do a diff of the old and new document. The documents are HTML
*fragments* (str/UTF8 or unicode), they are not complete documents
(i.e., no <html> tag).
Returns HTML with <ins> and <del> tags added around the
appropriate text.
Markup is generally ignored, with the markup from new_html
preserved, and possibly some markup from old_html (though it is
considered acceptable to lose some of the old markup). Only the
words in the HTML are diffed. The exception is <img> tags, which
are treated like words, and the href attribute of <a> tags, which
are noted inside the tag itself when there are changes.
"""
old_html_tokens = tokenize(old_html)
new_html_tokens = tokenize(new_html)
result = htmldiff_tokens(old_html_tokens, new_html_tokens)
result = ''.join(result).strip()
return fixup_ins_del_tags(result)
def htmldiff_tokens(html1_tokens, html2_tokens):
""" Does a diff on the tokens themselves, returning a list of text
chunks (not tokens).
"""
# There are several passes as we do the differences. The tokens
# isolate the portion of the content we care to diff; difflib does
# all the actual hard work at that point.
#
# Then we must create a valid document from pieces of both the old
# document and the new document. We generally prefer to take
# markup from the new document, and only do a best effort attempt
# to keep markup from the old document; anything that we can't
# resolve we throw away. Also we try to put the deletes as close
# to the location where we think they would have been -- because
# we are only keeping the markup from the new document, it can be
# fuzzy where in the new document the old text would have gone.
# Again we just do a best effort attempt.
s = InsensitiveSequenceMatcher(a=html1_tokens, b=html2_tokens)
commands = s.get_opcodes()
result = []
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
result.extend(expand_tokens(html2_tokens[j1:j2], equal=True))
continue
if command == 'insert' or command == 'replace':
ins_tokens = expand_tokens(html2_tokens[j1:j2])
merge_insert(ins_tokens, result)
if command == 'delete' or command == 'replace':
del_tokens = expand_tokens(html1_tokens[i1:i2])
merge_delete(del_tokens, result)
# If deletes were inserted directly as <del> then we'd have an
# invalid document at this point. Instead we put in special
# markers, and when the complete diffed document has been created
# we try to move the deletes around and resolve any problems.
result = cleanup_delete(result)
return result
def expand_tokens(tokens, equal=False):
"""Given a list of tokens, return a generator of the chunks of
text for the data in the tokens.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
if not equal or not token.hide_when_equal:
if token.trailing_whitespace:
yield token.html() + ' '
else:
yield token.html()
for post in token.post_tags:
yield post
def merge_insert(ins_chunks, doc):
""" doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that. """
# Though we don't throw away unbalanced_start or unbalanced_end
# (we assume there is accompanying markup later or earlier in the
# document), we only put <ins> around the balanced portion.
unbalanced_start, balanced, unbalanced_end = split_unbalanced(ins_chunks)
doc.extend(unbalanced_start)
if doc and not doc[-1].endswith(' '):
# Fix up the case where the word before the insert didn't end with
# a space
doc[-1] += ' '
doc.append('<ins>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </ins>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</ins> ')
doc.extend(unbalanced_end)
# These are sentinals to represent the start and end of a <del>
# segment, until we do the cleanup phase to turn them into proper
# markup:
class DEL_START:
pass
class DEL_END:
pass
class NoDeletes(Exception):
""" Raised when the document no longer contains any pending deletes
(DEL_START/DEL_END) """
def merge_delete(del_chunks, doc):
""" Adds the text chunks in del_chunks to the document doc (another
list of text chunks) with marker to show it is a delete.
cleanup_delete later resolves these markers into <del> tags."""
doc.append(DEL_START)
doc.extend(del_chunks)
doc.append(DEL_END)
def cleanup_delete(chunks):
""" Cleans up any DEL_START/DEL_END markers in the document, replacing
them with <del></del>. To do this while keeping the document
valid, it may need to drop some tags (either start or end tags).
It may also move the del into adjacent tags to try to move it to a
similar location where it was originally located (e.g., moving a
delete into preceding <div> tag, if the del looks like (DEL_START,
'Text</div>', DEL_END)"""
while 1:
# Find a pending DEL_START/DEL_END, splitting the document
# into stuff-preceding-DEL_START, stuff-inside, and
# stuff-following-DEL_END
try:
pre_delete, delete, post_delete = split_delete(chunks)
except NoDeletes:
# Nothing found, we've cleaned up the entire doc
break
# The stuff-inside-DEL_START/END may not be well balanced
# markup. First we figure out what unbalanced portions there are:
unbalanced_start, balanced, unbalanced_end = split_unbalanced(delete)
# Then we move the span forward and/or backward based on these
# unbalanced portions:
locate_unbalanced_start(unbalanced_start, pre_delete, post_delete)
locate_unbalanced_end(unbalanced_end, pre_delete, post_delete)
doc = pre_delete
if doc and not doc[-1].endswith(' '):
# Fix up case where the word before us didn't have a trailing space
doc[-1] += ' '
doc.append('<del>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </del>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</del> ')
doc.extend(post_delete)
chunks = doc
return chunks
def split_unbalanced(chunks):
"""Return (unbalanced_start, balanced, unbalanced_end), where each is
a list of text and tag chunks.
unbalanced_start is a list of all the tags that are opened, but
not closed in this span. Similarly, unbalanced_end is a list of
tags that are closed but were not opened. Extracting these might
mean some reordering of the chunks."""
start = []
end = []
tag_stack = []
balanced = []
for chunk in chunks:
if not chunk.startswith('<'):
balanced.append(chunk)
continue
endtag = chunk[1] == '/'
name = chunk.split()[0].strip('<>/')
if name in empty_tags:
balanced.append(chunk)
continue
if endtag:
if tag_stack and tag_stack[-1][0] == name:
balanced.append(chunk)
name, pos, tag = tag_stack.pop()
balanced[pos] = tag
elif tag_stack:
start.extend([tag for name, pos, tag in tag_stack])
tag_stack = []
end.append(chunk)
else:
end.append(chunk)
else:
tag_stack.append((name, len(balanced), chunk))
balanced.append(None)
start.extend(
[chunk for name, pos, chunk in tag_stack])
balanced = [chunk for chunk in balanced if chunk is not None]
return start, balanced, end
def split_delete(chunks):
""" Returns (stuff_before_DEL_START, stuff_inside_DEL_START_END,
stuff_after_DEL_END). Returns the first case found (there may be
more DEL_STARTs in stuff_after_DEL_END). Raises NoDeletes if
there's no DEL_START found. """
try:
pos = chunks.index(DEL_START)
except ValueError:
raise NoDeletes
pos2 = chunks.index(DEL_END)
return chunks[:pos], chunks[pos+1:pos2], chunks[pos2+1:]
def locate_unbalanced_start(unbalanced_start, pre_delete, post_delete):
""" pre_delete and post_delete implicitly point to a place in the
document (where the two were split). This moves that point (by
popping items from one and pushing them onto the other). It moves
the point to try to find a place where unbalanced_start applies.
As an example::
>>> unbalanced_start = ['<div>']
>>> doc = ['<p>', 'Text', '</p>', '<div>', 'More Text', '</div>']
>>> pre, post = doc[:3], doc[3:]
>>> pre, post
(['<p>', 'Text', '</p>'], ['<div>', 'More Text', '</div>'])
>>> locate_unbalanced_start(unbalanced_start, pre, post)
>>> pre, post
(['<p>', 'Text', '</p>', '<div>'], ['More Text', '</div>'])
As you can see, we moved the point so that the dangling <div> that
we found will be effectively replaced by the div in the original
document. If this doesn't work out, we just throw away
unbalanced_start without doing anything.
"""
while 1:
if not unbalanced_start:
# We have totally succeded in finding the position
break
finding = unbalanced_start[0]
finding_name = finding.split()[0].strip('<>')
if not post_delete:
break
next = post_delete[0]
if next is DEL_START or not next.startswith('<'):
# Reached a word, we can't move the delete text forward
break
if next[1] == '/':
# Reached a closing tag, can we go further? Maybe not...
break
name = next.split()[0].strip('<>')
if name == 'ins':
# Can't move into an insert
break
assert name != 'del', (
"Unexpected delete tag: %r" % next)
if name == finding_name:
unbalanced_start.pop(0)
pre_delete.append(post_delete.pop(0))
else:
# Found a tag that doesn't match
break
def locate_unbalanced_end(unbalanced_end, pre_delete, post_delete):
""" like locate_unbalanced_start, except handling end tags and
possibly moving the point earlier in the document. """
while 1:
if not unbalanced_end:
# Success
break
finding = unbalanced_end[-1]
finding_name = finding.split()[0].strip('<>/')
if not pre_delete:
break
next = pre_delete[-1]
if next is DEL_END or not next.startswith('</'):
# A word or a start tag
break
name = next.split()[0].strip('<>/')
if name == 'ins' or name == 'del':
# Can't move into an insert or delete
break
if name == finding_name:
unbalanced_end.pop()
post_delete.insert(0, pre_delete.pop())
else:
# Found a tag that doesn't match
break
class token(_unicode):
""" Represents a diffable token, generally a word that is displayed to
the user. Opening tags are attached to this token when they are
adjacent (pre_tags) and closing tags that follow the word
(post_tags). Some exceptions occur when there are empty tags
adjacent to a word, so there may be close tags in pre_tags, or
open tags in post_tags.
We also keep track of whether the word was originally followed by
whitespace, even though we do not want to treat the word as
equivalent to a similar word that does not have a trailing
space."""
# When this is true, the token will be eliminated from the
# displayed diff if no change has occurred:
hide_when_equal = False
def __new__(cls, text, pre_tags=None, post_tags=None, trailing_whitespace=False):
obj = _unicode.__new__(cls, text)
if pre_tags is not None:
obj.pre_tags = pre_tags
else:
obj.pre_tags = []
if post_tags is not None:
obj.post_tags = post_tags
else:
obj.post_tags = []
obj.trailing_whitespace = trailing_whitespace
return obj
def __repr__(self):
return 'token(%s, %r, %r)' % (_unicode.__repr__(self), self.pre_tags, self.post_tags)
def html(self):
return _unicode(self)
class tag_token(token):
""" Represents a token that is actually a tag. Currently this is just
the <img> tag, which takes up visible space just like a word but
is only represented in a document by a tag. """
def __new__(cls, tag, data, html_repr, pre_tags=None,
post_tags=None, trailing_whitespace=False):
obj = token.__new__(cls, "%s: %s" % (type, data),
pre_tags=pre_tags,
post_tags=post_tags,
trailing_whitespace=trailing_whitespace)
obj.tag = tag
obj.data = data
obj.html_repr = html_repr
return obj
def __repr__(self):
return 'tag_token(%s, %s, html_repr=%s, post_tags=%r, pre_tags=%r, trailing_whitespace=%s)' % (
self.tag,
self.data,
self.html_repr,
self.pre_tags,
self.post_tags,
self.trailing_whitespace)
def html(self):
return self.html_repr
class href_token(token):
""" Represents the href in an anchor tag. Unlike other words, we only
show the href when it changes. """
hide_when_equal = True
def html(self):
return ' Link: %s' % self
def tokenize(html, include_hrefs=True):
"""
Parse the given HTML and returns token objects (words with attached tags).
This parses only the content of a page; anything in the head is
ignored, and the <head> and <body> elements are themselves
optional. The content is then parsed by lxml, which ensures the
validity of the resulting parsed document (though lxml may make
incorrect guesses when the markup is particular bad).
<ins> and <del> tags are also eliminated from the document, as
that gets confusing.
If include_hrefs is true, then the href attribute of <a> tags is
included as a special kind of diffable token."""
if etree.iselement(html):
body_el = html
else:
body_el = parse_html(html, cleanup=True)
# Then we split the document into text chunks for each tag, word, and end tag:
chunks = flatten_el(body_el, skip_tag=True, include_hrefs=include_hrefs)
# Finally re-joining them into token objects:
return fixup_chunks(chunks)
def parse_html(html, cleanup=True):
"""
Parses an HTML fragment, returning an lxml element. Note that the HTML will be
wrapped in a <div> tag that was not in the original document.
If cleanup is true, make sure there's no <head> or <body>, and get
rid of any <ins> and <del> tags.
"""
if cleanup:
# This removes any extra markup or structure like <head>:
html = cleanup_html(html)
return fragment_fromstring(html, create_parent=True)
_body_re = re.compile(r'<body.*?>', re.I|re.S)
_end_body_re = re.compile(r'</body.*?>', re.I|re.S)
_ins_del_re = re.compile(r'</?(ins|del).*?>', re.I|re.S)
def cleanup_html(html):
""" This 'cleans' the HTML, meaning that any page structure is removed
(only the contents of <body> are used, if there is any <body).
Also <ins> and <del> tags are removed. """
match = _body_re.search(html)
if match:
html = html[match.end():]
match = _end_body_re.search(html)
if match:
html = html[:match.start()]
html = _ins_del_re.sub('', html)
return html
end_whitespace_re = re.compile(r'[ \t\n\r]$')
def fixup_chunks(chunks):
"""
This function takes a list of chunks and produces a list of tokens.
"""
tag_accum = []
cur_word = None
result = []
for chunk in chunks:
if isinstance(chunk, tuple):
if chunk[0] == 'img':
src = chunk[1]
tag = chunk[2]
if tag.endswith(' '):
tag = tag[:-1]
trailing_whitespace = True
else:
trailing_whitespace = False
cur_word = tag_token('img', src, html_repr=tag,
pre_tags=tag_accum,
trailing_whitespace=trailing_whitespace)
tag_accum = []
result.append(cur_word)
elif chunk[0] == 'href':
href = chunk[1]
cur_word = href_token(href, pre_tags=tag_accum, trailing_whitespace=True)
tag_accum = []
result.append(cur_word)
continue
if is_word(chunk):
if chunk.endswith(' '):
chunk = chunk[:-1]
trailing_whitespace = True
else:
trailing_whitespace = False
cur_word = token(chunk, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace)
tag_accum = []
result.append(cur_word)
elif is_start_tag(chunk):
tag_accum.append(chunk)
elif is_end_tag(chunk):
if tag_accum:
tag_accum.append(chunk)
else:
assert cur_word, (
"Weird state, cur_word=%r, result=%r, chunks=%r of %r"
% (cur_word, result, chunk, chunks))
cur_word.post_tags.append(chunk)
else:
assert(0)
if not result:
return [token('', pre_tags=tag_accum)]
else:
result[-1].post_tags.extend(tag_accum)
return result
# All the tags in HTML that don't require end tags:
empty_tags = (
'param', 'img', 'area', 'br', 'basefont', 'input',
'base', 'meta', 'link', 'col')
block_level_tags = (
'address',
'blockquote',
'center',
'dir',
'div',
'dl',
'fieldset',
'form',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'hr',
'isindex',
'menu',
'noframes',
'noscript',
'ol',
'p',
'pre',
'table',
'ul',
)
block_level_container_tags = (
'dd',
'dt',
'frameset',
'li',
'tbody',
'td',
'tfoot',
'th',
'thead',
'tr',
)
def flatten_el(el, include_hrefs, skip_tag=False):
""" Takes an lxml element el, and generates all the text chunks for
that tag. Each start tag is a chunk, each word is a chunk, and each
end tag is a chunk.
If skip_tag is true, then the outermost container tag is
not returned (just its contents)."""
if not skip_tag:
if el.tag == 'img':
yield ('img', el.get('src'), start_tag(el))
else:
yield start_tag(el)
if el.tag in empty_tags and not el.text and not len(el) and not el.tail:
return
start_words = split_words(el.text)
for word in start_words:
yield cgi.escape(word)
for child in el:
for item in flatten_el(child, include_hrefs=include_hrefs):
yield item
if el.tag == 'a' and el.get('href') and include_hrefs:
yield ('href', el.get('href'))
if not skip_tag:
yield end_tag(el)
end_words = split_words(el.tail)
for word in end_words:
yield cgi.escape(word)
def split_words(text):
""" Splits some text into words. Includes trailing whitespace (one
space) on each word when appropriate. """
if not text or not text.strip():
return []
words = [w + ' ' for w in text.strip().split()]
if not end_whitespace_re.search(text):
words[-1] = words[-1][:-1]
return words
start_whitespace_re = re.compile(r'^[ \t\n\r]')
def start_tag(el):
"""
The text representation of the start tag for a tag.
"""
return '<%s%s>' % (
el.tag, ''.join([' %s="%s"' % (name, cgi.escape(value, True))
for name, value in el.attrib.items()]))
def end_tag(el):
""" The text representation of an end tag for a tag. Includes
trailing whitespace when appropriate. """
if el.tail and start_whitespace_re.search(el.tail):
extra = ' '
else:
extra = ''
return '</%s>%s' % (el.tag, extra)
def is_word(tok):
return not tok.startswith('<')
def is_end_tag(tok):
return tok.startswith('</')
def is_start_tag(tok):
return tok.startswith('<') and not tok.startswith('</')
def fixup_ins_del_tags(html):
""" Given an html string, move any <ins> or <del> tags inside of any
block-level elements, e.g. transform <ins><p>word</p></ins> to
<p><ins>word</ins></p> """
doc = parse_html(html, cleanup=False)
_fixup_ins_del_tags(doc)
html = serialize_html_fragment(doc, skip_outer=True)
return html
def serialize_html_fragment(el, skip_outer=False):
""" Serialize a single lxml element as HTML. The serialized form
includes the elements tail.
If skip_outer is true, then don't serialize the outermost tag
"""
assert not isinstance(el, basestring), (
"You should pass in an element, not a string like %r" % el)
html = etree.tostring(el, method="html", encoding=_unicode)
if skip_outer:
# Get rid of the extra starting tag:
html = html[html.find('>')+1:]
# Get rid of the extra end tag:
html = html[:html.rfind('<')]
return html.strip()
else:
return html
def _fixup_ins_del_tags(doc):
"""fixup_ins_del_tags that works on an lxml document in-place
"""
for tag in ['ins', 'del']:
for el in doc.xpath('descendant-or-self::%s' % tag):
if not _contains_block_level_tag(el):
continue
_move_el_inside_block(el, tag=tag)
el.drop_tag()
#_merge_element_contents(el)
def _contains_block_level_tag(el):
"""True if the element contains any block-level elements, like <p>, <td>, etc.
"""
if el.tag in block_level_tags or el.tag in block_level_container_tags:
return True
for child in el:
if _contains_block_level_tag(child):
return True
return False
def _move_el_inside_block(el, tag):
""" helper for _fixup_ins_del_tags; actually takes the <ins> etc tags
and moves them inside any block-level tags. """
for child in el:
if _contains_block_level_tag(child):
break
else:
import sys
# No block-level tags in any child
children_tag = etree.Element(tag)
children_tag.text = el.text
el.text = None
children_tag.extend(list(el))
el[:] = [children_tag]
return
for child in list(el):
if _contains_block_level_tag(child):
_move_el_inside_block(child, tag)
if child.tail:
tail_tag = etree.Element(tag)
tail_tag.text = child.tail
child.tail = None
el.insert(el.index(child)+1, tail_tag)
else:
child_tag = etree.Element(tag)
el.replace(child, child_tag)
child_tag.append(child)
if el.text:
text_tag = etree.Element(tag)
text_tag.text = el.text
el.text = None
el.insert(0, text_tag)
def _merge_element_contents(el):
"""
Removes an element, but merges its contents into its place, e.g.,
given <p>Hi <i>there!</i></p>, if you remove the <i> element you get
<p>Hi there!</p>
"""
parent = el.getparent()
text = el.text or ''
if el.tail:
if not len(el):
text += el.tail
else:
if el[-1].tail:
el[-1].tail += el.tail
else:
el[-1].tail = el.tail
index = parent.index(el)
if text:
if index == 0:
previous = None
else:
previous = parent[index-1]
if previous is None:
if parent.text:
parent.text += text
else:
parent.text = text
else:
if previous.tail:
previous.tail += text
else:
previous.tail = text
parent[index:index+1] = el.getchildren()
class InsensitiveSequenceMatcher(difflib.SequenceMatcher):
"""
Acts like SequenceMatcher, but tries not to find very small equal
blocks amidst large spans of changes
"""
threshold = 2
def get_matching_blocks(self):
size = min(len(self.b), len(self.b))
threshold = min(self.threshold, size / 4)
actual = difflib.SequenceMatcher.get_matching_blocks(self)
return [item for item in actual
if item[2] > threshold
or not item[2]]
if __name__ == '__main__':
from lxml.html import _diffcommand
_diffcommand.main()
|
|
'''
Title: ParcelFabricInvestigator_addin
Author: Stephanie Wendel
Created: 1/13/2014
Update: 4/21/2014
Version: 1.4.1
Description: Provides a toolbar of buttons to use in ArcGIS Desktop that
investigate potential data issues with the Parcel Fabric.
'''
import arcpy
import pythonaddins
import os
# Global Variables
parcelFabric = None
parcelFabricLayer = None
SR = None
mxd = arcpy.mapping.MapDocument("Current")
df = arcpy.mapping.ListDataFrames(mxd)[0]
class Attributes(object):
"""Implementation for ParcelFabricInvestigator_addin.Attributes (Button)
This tool is not implemented."""
def __init__(self):
self.enabled = False
self.checked = False
def onClick(self):
pass
class DupPoints(object):
"""Implementation for ParcelFabricInvestigator_addin.DupPoints (Button)
Finds where there are duplicated points within the Points layer of the
fabric"""
def __init__(self):
self.enabled = False
self.checked = False
def onClick(self):
workspace, PFname = os.path.split(parcelFabric)
ParcelPoints = "FabricInvestigation\\Points"
DuplicatePointsTable = os.path.join("in_memory", PFname + "_IdenticalPoints")
arcpy.FindIdentical_management(ParcelPoints, DuplicatePointsTable ,"X;Y",
"#","0","ONLY_DUPLICATES")
arcpy.AddJoin_management(ParcelPoints, "OBJECTID", DuplicatePointsTable,
"IN_FID", "KEEP_COMMON")
saveDuplicates = pythonaddins.SaveDialog("Save Duplicate Points")
arcpy.CopyFeatures_management(ParcelPoints, saveDuplicates)
newPath, newLayer = os.path.split(saveDuplicates)
arcpy.mapping.MoveLayer(df, arcpy.mapping.ListLayers(mxd,
"FabricInvestigation")[0],
arcpy.mapping.ListLayers(mxd, newLayer)[0],
"BEFORE")
DupPoints.checked = False
class Lines(object):
"""Implementation for ParcelFabricInvestigator_addin.Lines (Button)
This too is not implemented."""
def __init__(self):
self.enabled = False
self.checked = False
def onClick(self):
pass
class Parcel(object):
"""Implementation for ParcelFabricInvestigator_addin.Parcel (Button)
This tool looks at the Check Fabric text file and finds problematic parcels.
It selects those parcels and allows the user to export them."""
def __init__(self):
self.enabled = False
self.checked = False
def onClick(self):
message1 = "Have you saved a text file report from the Check Fabric tool for the setup fabric?\n\n If yes, click OK and navigate to the location.\n\n If not, click Cancel. Please run the Check Fabric tool by right clicking on the parcel fabric. Next save the report as a text file."
box1 = pythonaddins.MessageBox(message1, "Find Check Fabric Text File", 1)
if box1 == 'OK':
textFile = pythonaddins.OpenDialog("Find Check Fabric Text File")
#print textFile
F1 = open(textFile, 'rb')
parcelId1 = []
for line in F1:
if "Parcel with ID =" in line:
pID = line[17:-28]
parcelId1.append(pID)
pfl = arcpy.mapping.ListLayers(mxd, "*Parcels")
for layer in pfl:
if layer.name == "Tax Parcels":
polygons = "FabricInvestigation\\Tax Parcels"
elif layer.name == "Parcels":
polygons = "FabricInvestigation\\Parcels"
else:
# Adding a message box here will cause tool to fail due to
# the looping of the layers when it finds layers not
# containing the Parcels or Tax Parcels.
pass
for ID in parcelId1:
if ID == parcelId1[0]:
where = '"OBJECTID" =' + str(ID)
else:
where = where + 'OR "OBJECTID" = ' + str(ID)
arcpy.SelectLayerByAttribute_management(polygons, "NEW_SELECTION",
where)
box3 = pythonaddins.MessageBox("Done selecting bad parcels. Click Yes if you would like to save as a feature class. Click No to return to map.",
"Finished Process", 4)
if box3 == "Yes":
newFC = pythonaddins.SaveDialog("Save Bad Parcels")
if newFC != "Cancel":
arcpy.CopyFeatures_management(polygons, newFC)
else:
pass
newPath, newLayer = os.path.split(newFC)
arcpy.mapping.MoveLayer(df, arcpy.mapping.ListLayers(mxd,
"FabricInvestigation")[0],
arcpy.mapping.ListLayers(mxd, newLayer)[0],
"BEFORE")
Parcel.checked = False
class ParcelFabricSetup(object):
"""Implementation for ParcelFabricInvestigator_addin.ParcelFabricSetup (Button)
This setups a parcel fabric to be used in the tools and adds the layer to the
map."""
def __init__(self):
self.enabled = True
self.checked = True
def onClick(self):
global parcelFabric, parcelFabricLayer, SR
box = pythonaddins.MessageBox("Navigate to the Parcel Fabric you wish to investigate.",
"Select Parcel Fabric", 0)
parcelFabric = pythonaddins.OpenDialog("Find Parcel Fabric")
parcelFabricLayer = arcpy.MakeParcelFabricLayer_fabric(parcelFabric,
"FabricInvestigation")
SR = arcpy.Describe(parcelFabric).spatialReference
Parcel.enabled = True
Parcel.checked = True
DupPoints.enabled = True
DupPoints.checked = True
Point.enabled = True
Point.checked = True
ParcelFabricSetup.checked = False
ParcelLineGaps.enabled = True
ParcelLineGaps.checked = True
# To add
## pfl = arcpy.mapping.ListLayers(mxd, "*Parcels")
## for layer in pfl:
## if layer.name == "Tax Parcels":
## polygons = "FabricInvestigation\\Tax Parcels"
## elif layer.name == "Parcels":
## polygons = "FabricInvestigation\\Parcels"
## else:
## #Adding a message box here will cause tool to fail due to the looping of the layers and when if finds non Parcel layers.
## pass
class ParcelLineGaps(object):
"""Implementation for ParcelFabricInvestigator_addin.ParcelLineGaps (Button)
This tool shows where there are gaps in the fabric between the lines and the Parcels"""
def __init__(self):
self.enabled = False
self.checked = False
def onClick(self):
box = pythonaddins.MessageBox("You are about to look for gaps. This process could take a long time depending on the size of the fabric. Please be patient with the interface until it shows a finished message.", \
"Select Parcel Fabric", 1)
if box == "OK":
workspace, PFname = os.path.split(parcelFabric)
topology = os.path.join(workspace, PFname + "_topology")
if arcpy.Exists(topology):
arcpy.Delete_management(topology)
arcpy.CreateTopology_management(workspace, PFname + "_topology")
pfl = arcpy.mapping.ListLayers(mxd, "*Parcels")
for layer in pfl:
if layer.name == "Tax Parcels":
polygons = "FabricInvestigation\\Tax Parcels"
elif layer.name == "Parcels":
polygons = "FabricInvestigation\\Parcels"
else:
# Adding a message box here will cause tool to fail due to
# the looping of the layers when it finds layers not
# containing the Parcels or Tax Parcels.
pass
arcpy.AddFeatureClassToTopology_management(topology, polygons)
arcpy.AddFeatureClassToTopology_management(topology,
"FabricInvestigation\\Lines")
polygon_fc = os.path.join(workspace, PFname + "_Parcels")
line_fc = os.path.join(workspace, PFname + "_Lines")
arcpy.AddRuleToTopology_management(topology,
"Boundary Must Be Covered By (Area-Line)",
polygon_fc, "", line_fc)
arcpy.ValidateTopology_management(topology)
gdb, fds_name = os.path.split(workspace)
arcpy.ExportTopologyErrors_management(topology, gdb, "Gaps")
arcpy.mapping.MoveLayer(df, arcpy.mapping.ListLayers(mxd,
"FabricInvestigation")[0],
arcpy.mapping.ListLayers(mxd,
"Gaps_line")[0], "BEFORE")
arcpy.mapping.RemoveLayer(df, arcpy.mapping.ListLayers(mxd,
"Gaps_point")[0])
arcpy.mapping.RemoveLayer(df, arcpy.mapping.ListLayers(mxd,
"Gaps_poly")[0])
arcpy.mapping.RemoveLayer(df, arcpy.mapping.ListLayers(mxd,
PFname + "_topology")[0])
box2 = pythonaddins.MessageBox("Finished Processing Gaps. Please proceed.",
"Finsihed Processing Gaps", 0)
ParcelLineGaps.checked = False
class Point(object):
"""Implementation for ParcelFabricInvestigator_addin.Point (Button)
This shows points that are not in their true locations in the display based
on their x and y values."""
def __init__(self):
self.enabled = False
self.checked = False
def onClick(self):
arcpy.MakeXYEventLayer_management("FabricInvestigation\\Points", "X", "Y",
"TruePointLocations", SR)
(arcpy.mapping.ListLayers(mxd, "TruePointLocations")[0]).visible = False
arcpy.SelectLayerByLocation_management("FabricInvestigation\\Points",
"ARE_IDENTICAL_TO",
"TruePointLocations",
selection_type="NEW_SELECTION")
arcpy.SelectLayerByLocation_management("FabricInvestigation\\Points",
selection_type="SWITCH_SELECTION")
box = pythonaddins.MessageBox("Done selecting points that are in the wrong location. You will not see any selected points if there is a visible scale range set and you are not in that range. Click Yes if you would like to save as a feature class. Click No to return to map.",
"Finished Process", 4)
if box == "Yes":
newFC = pythonaddins.SaveDialog("Save Bad Parcels")
if newFC != "Cancel":
arcpy.CopyFeatures_management("FabricInvestigation\\Points", newFC)
else:
pass
newPath, newLayer = os.path.split(newFC)
arcpy.mapping.MoveLayer(df, arcpy.mapping.ListLayers(mxd,
"FabricInvestigation")[0],
arcpy.mapping.ListLayers(mxd, newLayer)[0],
"BEFORE")
Point.checked = False
class Summary(object):
"""Implementation for ParcelFabricInvestigator_addin.Summary (Button)
This tool is not yet implemented"""
def __init__(self):
self.enabled = False
self.checked = False
def onClick(self):
pass
|
|
import urlparse
from django.views import generic as generic_views
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from django.utils.importlib import import_module
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from braces.views import LoginRequiredMixin
from pyconde.conference.models import current_conference, SessionKind
from . import forms
from . import models
from . import settings
class NextRedirectMixin(object):
"""
A simple mixin for checking for a next parameter for redirects.
"""
redirect_param = 'next'
def get_next_redirect(self):
next = self.request.GET.get(self.redirect_param)
if next is None:
return None
netloc = urlparse.urlparse(next)[1]
if netloc is None or netloc == "" or netloc == self.request.get_host():
return next
return None
class TypedProposalFormMixin(object):
"""
This mixin overrides the original get_form_class method in order to replace
the default form with a type-specific form based on the type provided
as view argument or already associated with the object to be processed.
"""
def get_form_class(self):
if settings.UNIFIED_SUBMISSION_FORM:
return forms.ProposalSubmissionForm
type_slug = None
if getattr(self, 'object') and isinstance(self.object, models.Proposal):
type_slug = self.object.kind.slug
elif 'type' in self.kwargs:
type_slug = self.kwargs['type']
if type_slug:
self.proposal_kind = SessionKind.current_objects.get(slug=type_slug)
formcls_path = settings.TYPED_SUBMISSION_FORMS.get(type_slug)
if formcls_path:
mod_name, cls_name = formcls_path.rsplit('.', 1)
mod = import_module(mod_name)
form_cls = getattr(mod, cls_name)
if form_cls:
return form_cls
return forms.TypedSubmissionForm
def get_form(self, form_class):
form = super(TypedProposalFormMixin, self).get_form(form_class)
if hasattr(self, 'proposal_kind'):
setattr(form, 'kind_instance', self.proposal_kind)
return form
def get_template_names(self):
"""
Also look for a template with the name proposals/%(type)s_proposal_form.html
"""
proposed_names = super(TypedProposalFormMixin, self).get_template_names()
if hasattr(self, 'proposal_kind'):
base_name = proposed_names[0]
base_dir, name = base_name.rsplit('/')
proposed_names.insert(0, "proposals/{0}_proposal_form.html".format(self.proposal_kind.slug))
return proposed_names
class SubmitProposalView(TypedProposalFormMixin, NextRedirectMixin, generic_views.CreateView):
"""
Once registered a user can submit a proposal for the conference for a
specific kind. This is only possible while the selected SessionKind
accepts submissions.
"""
model = models.Proposal
# In this case we can't use LoginRequiredMixin since we override dispatch
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not len(SessionKind.current_objects.filter_open_kinds()):
return TemplateResponse(request=request, template='proposals/closed.html', context={})
if not settings.UNIFIED_SUBMISSION_FORM and 'type' not in kwargs:
# If unified submission is disabled and no type has been specified
# we have to show dummy page to tell the user what session kinds
# now accept proposals
session_kinds = []
open_session_kinds = []
for kind in SessionKind.current_objects.all():
if kind.accepts_proposals():
open_session_kinds.append(kind)
session_kinds.append(kind)
if not open_session_kinds:
return TemplateResponse(request=request, template='proposals/closed.html', context={})
return TemplateResponse(request=request, template='proposals/submission_intro.html', context={'session_kinds': session_kinds, 'open_kinds': open_session_kinds})
return super(SubmitProposalView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
next = self.get_next_redirect()
if next:
return next
return super(SubmitProposalView, self).get_success_url()
def form_valid(self, form):
obj = form.save(commit=False)
obj.speaker = self.request.user.speaker_profile
# TODO: Filter out duplications between speaker and additional speakers
obj.conference = current_conference()
obj.save()
self.object = obj
form.save_m2m()
return HttpResponseRedirect(self.get_success_url())
class SingleProposalView(generic_views.DetailView):
"""
Proposals can be viewed by everyone but provide some special links for
administrators and people participating in this proposal.
"""
model = models.Proposal
def get(self, request, *args, **kwargs):
self.object = self.get_object()
# note: self.object is a Proposal
# user is allowed to see Proposal if he is the author (speaker)
# OR is he is a reviewer - both is checked in can_participate_in_review()
from ..reviews.utils import can_participate_in_review
if not can_participate_in_review(request.user, self.object):
return TemplateResponse(request=request, template='proposals/denied.html', context={})
return super(SingleProposalView, self).get(request, *args, **kwargs)
def get_queryset(self):
if self.request.user.is_anonymous():
return self.model.objects.none()
return self.model.objects.filter(conference=current_conference())
def get_context_data(self, **kwargs):
data = super(SingleProposalView, self).get_context_data(**kwargs)
data['can_leave'] = self.request.user in [s.user for s in self.object.additional_speakers.all()]
data['can_edit'] = self.request.user == self.object.speaker.user
data['can_delete'] = self.request.user.is_staff or self.request.user == self.object.speaker.user
return data
class PermissionCheckedUpdateView(generic_views.UpdateView, NextRedirectMixin):
"""
Base update class that extends the UpdateView with an additional call
of check_permissions.
"""
def get_success_url(self):
next = self.get_next_redirect()
if next:
return next
return super(PermissionCheckedUpdateView, self).get_success_url()
def get_context_data(self, *args, **kwargs):
ctx = super(PermissionCheckedUpdateView, self).get_context_data(*args, **kwargs)
ctx.update({
'next': self.get_success_url()
})
return ctx
def get(self, request, *args, **kwargs):
self.object = self.get_object()
resp = self.check_permissions()
if resp is not None:
return resp
return super(PermissionCheckedUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
resp = self.check_permissions()
if resp is not None:
return resp
return super(PermissionCheckedUpdateView, self).post(request, *args, **kwargs)
class EditProposalView(LoginRequiredMixin, TypedProposalFormMixin, PermissionCheckedUpdateView):
"""
The primary speaker can edit a proposal as long as the SessionKind
still accepts proposals.
"""
model = models.Proposal
def form_valid(self, form):
self.object = form.save()
form.save_m2m()
messages.success(self.request, _("Proposal successfully changed"))
return HttpResponseRedirect(self.get_success_url())
def check_permissions(self):
"""
Only the primary speaker and staff members can edit a proposal.
"""
user = self.request.user
kind = self.object.kind
if not kind.accepts_proposals():
messages.error(self.request, _("You can no longer edit this proposal because the submission period has already ended."))
return HttpResponseRedirect(self.object.get_absolute_url())
if user != self.object.speaker.user and not user.is_staff:
messages.error(self.request, _("You have to be the primary speaker mentioned in the proposal in order to edit it."))
return HttpResponseRedirect(self.object.get_absolute_url())
return None
class AbstractProposalAction(generic_views.DetailView, NextRedirectMixin):
model = models.Proposal
def check_permissions(self):
pass
def get(self, request, *args, **kwargs):
self.object = self.get_object()
resp = self.check_permissions()
if resp is not None:
return resp
return super(AbstractProposalAction, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
resp = self.check_permissions()
if resp is not None:
return resp
resp = self.action()
if resp is not None:
return resp
return HttpResponseRedirect(self.get_post_action_url())
def get_context_data(self, *args, **kwargs):
ctx = super(AbstractProposalAction, self).get_context_data(*args, **kwargs)
ctx.update({
'next': self.get_post_action_url()
})
return ctx
def get_post_action_url(self):
next = self.get_next_redirect()
if next:
return next
return reverse('view_proposal', kwargs={'pk': self.object.pk})
class AbstractCancelProposalView(AbstractProposalAction):
"""
During the submission and review period a proposal can be cancelled
by the primary speaker. As soon as the review period is over
and the proposal got accepted, an cancellation has to be communicated
to the relevant staff member since this will involve rescheduling
of other sessions.
"""
template_name_suffix = '_cancel_confirm'
model = models.Proposal
def check_permissions(self):
user = self.request.user
kind = self.object.kind
if not kind.accepts_proposals():
messages.error(self.request, _("You can no longer cancel this proposal because the submission period has already ended."))
return HttpResponseRedirect(self.object.get_absolute_url())
if user != self.object.speaker.user:
messages.error(self.request, _("You have to be the primary speaker mentioned in the proposal in order to cancel it."))
return HttpResponseRedirect(self.object.get_absolute_url())
return None
def action(self):
self.object.delete()
messages.success(self.request, _("Proposal has been removed"))
return None
def get_post_action_url(self):
next = self.get_next_redirect()
if next:
return next
return reverse('my_proposals')
class CancelProposalView(LoginRequiredMixin, AbstractCancelProposalView):
pass
class LeaveProposalView(LoginRequiredMixin, AbstractCancelProposalView):
"""
A secondary speaker can decide not to actually take part in a session
and therefor leave a proposal. This is an option that is exclusive
to secondary speakers and is not available to the primary speaker.
"""
template_name_suffix = "_leave_confirm"
def check_permissions(self):
user = self.request.user
kind = self.object.kind
if not kind.accepts_proposals():
messages.error(self.request, _("You can no longer leave this proposal because the submission period has already ended."))
return HttpResponseRedirect(self.object.get_absolute_url())
if user not in [s.user for s in self.object.additional_speakers.all()]:
messages.error(self.request, _("Only secondary speakers can leave a proposal"))
return HttpResponseRedirect(self.object.get_absolute_url())
return None
def action(self):
self.object.additional_speakers.remove(self.request.user.speaker_profile)
messages.success(self.request, _("You were successfully removed as secondary speaker."))
class ListUserProposalsView(LoginRequiredMixin, generic_views.TemplateView):
"""
A speaker can see and manage a list of proposals submitted by her or that
include her as a secondary speaker.
"""
template_name = 'proposals/proposal_list_mine.html'
def get_context_data(self, **kwargs):
this_speaker = self.request.user.speaker_profile
ctx = super(ListUserProposalsView, self).get_context_data(**kwargs)
ctx.update({
'proposals': this_speaker.proposals
.filter(conference=current_conference()).all(),
'proposal_participations': this_speaker.proposal_participations
.filter(conference=current_conference()).all()
})
return ctx
class IndexView(generic_views.View):
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse('submit_proposal'))
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import multiprocessing
import os
import re
import shutil
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class KerasCallbacksTest(test.TestCase):
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=1))
assert os.path.exists(filepath.format(epoch=3))
os.remove(filepath.format(epoch=1))
os.remove(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=2))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.test_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.test_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper], verbose=0)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0)
assert len(hist.epoch) >= patience
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (float(keras.backend.get_value(model.optimizer.lr)) - 0.2
) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
epsilon=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)),
0.01,
atol=1e-4)
def test_CSVLogger(self):
with self.test_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
with open(filepath) as csvfile:
output = ' '.join(csvfile.readlines())
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
with self.test_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(keras.layers.Dense(2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
history = model.fit(x_train, y_train, batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks, epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=temp_dir, histogram_freq=1, write_images=True,
write_grads=True, batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
def test_TensorBoard_histogram_freq_must_have_validation_data(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
with self.test_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
inp = keras.Input((INPUT_DIM,))
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(
log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit w/o validation data should raise ValueError if histogram_freq > 0
with self.assertRaises(ValueError):
model.fit(x_train, y_train, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=1), epochs=3)
# fit generator without validation data should raise ValueError if
# histogram_freq > 0
with self.assertRaises(ValueError):
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=1))
# fit generator with validation data generator should raise ValueError if
# histogram_freq > 0
with self.assertRaises(ValueError):
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=data_generator(False),
validation_steps=1,
callbacks=callbacks_factory(histogram_freq=1))
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
with self.test_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
def test_LambdaCallback(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
def target():
while True:
pass
p = multiprocessing.Process(target=target)
p.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
p.join()
assert not p.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.test_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
keras.callbacks.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
if __name__ == '__main__':
test.main()
|
|
# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_config import cfg
from webob import exc
from neutron.common import constants
from neutron.db import extraroute_db
from neutron.extensions import extraroute
from neutron.extensions import l3
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_l3_plugin as test_l3
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class ExtraRouteTestExtensionManager(object):
def get_resources(self):
l3.RESOURCE_ATTRIBUTE_MAP['routers'].update(
extraroute.EXTENDED_ATTRIBUTES_2_0['routers'])
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# This plugin class is for tests with plugin that integrates L3.
class TestExtraRouteIntPlugin(test_l3.TestL3NatIntPlugin,
extraroute_db.ExtraRoute_db_mixin):
supported_extension_aliases = ["external-net", "router", "extraroute"]
# A fake l3 service plugin class with extra route capability for
# plugins that delegate away L3 routing functionality
class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin,
extraroute_db.ExtraRoute_db_mixin):
supported_extension_aliases = ["router", "extraroute"]
class ExtraRouteDBTestCaseBase(object):
def _routes_update_prepare(self, router_id, subnet_id,
port_id, routes, skip_add=False):
if not skip_add:
self._router_interface_action('add', router_id, subnet_id, port_id)
self._update('routers', router_id, {'router': {'routes': routes}})
return self._show('routers', router_id)
def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes):
self._update('routers', router_id, {'router': {'routes': routes}})
self._router_interface_action('remove', router_id, subnet_id, port_id)
def test_route_update_with_one_route(self):
routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(body['router']['routes'], routes)
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_clear_routes_with_None(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._routes_update_prepare(r['router']['id'],
None, p['port']['id'], routes)
body = self._update('routers', r['router']['id'],
{'router': {'routes': None}})
self.assertEqual(body['router']['routes'], [])
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_router_interface_in_use_by_route(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(body['router']['routes'], routes)
self._router_interface_action(
'remove',
r['router']['id'],
None,
p['port']['id'],
expected_code=exc.HTTPConflict.code)
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_update_with_multi_routes(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes))
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_routes_update_for_multiple_routers(self):
routes1 = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.0.3'}]
routes2 = [{'destination': '12.0.0.0/8',
'nexthop': '10.0.0.4'}]
with contextlib.nested(
self.router(),
self.router(),
self.subnet(cidr='10.0.0.0/24')) as (r1, r2, s):
with contextlib.nested(
self.port(subnet=s),
self.port(subnet=s)) as (p1, p2):
body = self._routes_update_prepare(r1['router']['id'],
None, p1['port']['id'],
routes1)
self.assertEqual(body['router']['routes'], routes1)
body = self._routes_update_prepare(r2['router']['id'],
None, p2['port']['id'],
routes2)
self.assertEqual(body['router']['routes'], routes2)
self._routes_update_cleanup(p1['port']['id'],
None, r1['router']['id'], [])
self._routes_update_cleanup(p2['port']['id'],
None, r2['router']['id'], [])
def test_router_update_delete_routes(self):
routes_orig = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
routes_left = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes_orig)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes_orig))
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes_left,
skip_add=True)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes_left))
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def _test_malformed_route(self, routes):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._update('routers', r['router']['id'],
{'router': {'routes': routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_no_destination_route(self):
self._test_malformed_route([{'nexthop': '10.0.1.6'}])
def test_no_nexthop_route(self):
self._test_malformed_route({'destination': '135.207.0.0/16'})
def test_none_destination(self):
self._test_malformed_route([{'destination': None,
'nexthop': '10.0.1.3'}])
def test_none_nexthop(self):
self._test_malformed_route([{'destination': '135.207.0.0/16',
'nexthop': None}])
def test_nexthop_is_port_ip(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
port_ip = p['port']['fixed_ips'][0]['ip_address']
routes = [{'destination': '135.207.0.0/16',
'nexthop': port_ip}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_too_many_routes(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'},
{'destination': '192.168.0.0/16',
'nexthop': '10.0.1.6'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_dup_address(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_invalid_ip_address(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '512.207.0.0/16',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
routes = [{'destination': '127.207.0.0/48',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
routes = [{'destination': 'invalid_ip_address',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_invalid_nexthop_ip(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '127.207.0.0/16',
'nexthop': ' 300.10.10.4'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_nexthop_is_outside_port_subnet(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '127.207.0.0/16',
'nexthop': ' 20.10.10.4'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_on_external_port(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
port_res = self._list_ports(
'json',
200,
s['subnet']['network_id'],
tenant_id=r['router']['tenant_id'],
device_owner=constants.DEVICE_OWNER_ROUTER_GW)
port_list = self.deserialize('json', port_res)
self.assertEqual(len(port_list['ports']), 1)
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
body = self._update('routers', r['router']['id'],
{'router': {'routes':
routes}})
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['routes'],
routes)
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
gw_info = body['router']['external_gateway_info']
self.assertIsNone(gw_info)
def test_router_list_with_sort(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_sort('router', (router3, router2, router1),
[('name', 'desc')])
def test_router_list_with_pagination(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_pagination('router',
(router1, router2, router3),
('name', 'asc'), 2, 2)
def test_router_list_with_pagination_reverse(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_pagination_reverse('router',
(router1, router2,
router3),
('name', 'asc'), 2, 2)
class ExtraRouteDBIntTestCase(test_l3.L3NatDBIntTestCase,
ExtraRouteDBTestCaseBase):
def setUp(self, plugin=None, ext_mgr=None):
if not plugin:
plugin = ('neutron.tests.unit.test_extension_extraroute.'
'TestExtraRouteIntPlugin')
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
cfg.CONF.set_default('max_routes', 3)
ext_mgr = ExtraRouteTestExtensionManager()
super(test_l3.L3BaseForIntTests, self).setUp(plugin=plugin,
ext_mgr=ext_mgr)
self.setup_notification_driver()
class ExtraRouteDBSepTestCase(test_l3.L3NatDBSepTestCase,
ExtraRouteDBTestCaseBase):
def setUp(self):
# the plugin without L3 support
plugin = 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin'
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.test_extension_extraroute.'
'TestExtraRouteL3NatServicePlugin')
service_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
cfg.CONF.set_default('max_routes', 3)
ext_mgr = ExtraRouteTestExtensionManager()
super(test_l3.L3BaseForSepTests, self).setUp(
plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.setup_notification_driver()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import python libs
import os
import msgpack
import io
import struct
import shutil
# Import maras libs
from maras.index import (Index,
IndexException,
DocIdNotFound,
ElemNotFound,
TryReindexException,
IndexPreconditionsException)
from maras.storage import IU_Storage, DummyStorage
from maras.env import menv
if menv.get('rlock_obj'):
from maras import patch
patch.patch_cache_rr(menv['rlock_obj'])
from maras.rr_cache import cache1lvl
from maras.misc import random_hex_40
class IU_HashIndex(Index):
'''
This class is for Internal Use only, if you want to use HashIndex just
subclass the :py:class:`HashIndex` instead this one.
That design is because main index logic should be always in database
not in custom user indexes.
'''
def __init__(
self,
db_path,
name,
entry_line_format='<40s{key}IIcI',
hash_lim=0xfffff,
storage_class=None,
key_format='c'):
'''
The index is capable to solve conflicts by `Separate chaining`
:param db_path: database path
:type db_path: string
:param name: index name
:type name: ascii string
:param line_format: line format, `key_format` parameter value will replace `{key}` if present.
:type line_format: string (40s{key}IIcI by default) {doc_id}{hash_key}{start}{size}{status}{next}
:param hash_lim: maximum hash functon results (remember about birthday problem) count from 0
:type hash_lim: integer
:param storage_class: Storage class by default it will open standard :py:class:`maras.storage.Storage` (if string has to be accesible by globals()[storage_class])
:type storage_class: class name which will be instance of maras.storage.Storage instance or None
:param key_format: a index key format
'''
if key_format and '{key}' in entry_line_format:
entry_line_format = entry_line_format.replace('{key}', key_format)
super(IU_HashIndex, self).__init__(db_path, name)
self.hash_lim = hash_lim
if not storage_class:
storage_class = IU_Storage
if storage_class and not isinstance(storage_class, basestring):
storage_class = storage_class.__name__
self.storage_class = storage_class
self.storage = None
self.bucket_line_format = '<I'
self.bucket_line_size = struct.calcsize(self.bucket_line_format)
self.entry_line_format = entry_line_format
self.entry_line_size = struct.calcsize(self.entry_line_format)
cache = cache1lvl(100)
self._find_key = cache(self._find_key)
self._locate_doc_id = cache(self._locate_doc_id)
self.bucket_struct = struct.Struct(self.bucket_line_format)
self.entry_struct = struct.Struct(self.entry_line_format)
self.data_start = (
self.hash_lim + 1) * self.bucket_line_size + self._start_ind + 2
def _fix_params(self):
super(IU_HashIndex, self)._fix_params()
self.bucket_line_size = struct.calcsize(self.bucket_line_format)
self.entry_line_size = struct.calcsize(self.entry_line_format)
self.bucket_struct = struct.Struct(self.bucket_line_format)
self.entry_struct = struct.Struct(self.entry_line_format)
self.data_start = (
self.hash_lim + 1) * self.bucket_line_size + self._start_ind + 2
def open_index(self):
if not os.path.isfile(os.path.join(self.db_path, self.name + '_buck')):
raise IndexException('Doesn\'t exists')
self.buckets = io.open(
os.path.join(self.db_path, self.name + '_buck'), 'r+b', buffering=0)
self._fix_params()
self._open_storage()
def create_index(self):
if os.path.isfile(os.path.join(self.db_path, self.name + '_buck')):
raise IndexException('Already exists')
with io.open(os.path.join(self.db_path, self.name + '_buck'), 'w+b') as f:
props = dict(name=self.name,
bucket_line_format=self.bucket_line_format,
entry_line_format=self.entry_line_format,
hash_lim=self.hash_lim,
version=self.__version__,
storage_class=self.storage_class)
f.write(msgpack.dumps(props))
self.buckets = io.open(
os.path.join(self.db_path, self.name + '_buck'), 'r+b', buffering=0)
self._create_storage()
def destroy(self):
super(IU_HashIndex, self).destroy()
self._clear_cache()
def _open_storage(self):
s = globals()[self.storage_class]
if not self.storage:
self.storage = s(self.db_path, self.name)
self.storage.open()
def _create_storage(self):
s = globals()[self.storage_class]
if not self.storage:
self.storage = s(self.db_path, self.name)
self.storage.create()
# def close_index(self):
# self.buckets.flush()
# self.buckets.close()
# self.storage.close()
# @lfu_cache(100)
def _find_key(self, key):
'''
Find the key position
:param key: the key to find
'''
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
if not location:
return None, None, 0, 0, 'u'
found_at, doc_id, l_key, start, size, status, _next = self._locate_key(
key, location)
if status == 'd': # when first record from many is deleted
while True:
found_at, doc_id, l_key, start, size, status, _next = self._locate_key(
key, _next)
if status != 'd':
break
return doc_id, l_key, start, size, status
else:
return None, None, 0, 0, 'u'
def _find_key_many(self, key, limit=1, offset=0):
location = None
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
while offset:
if not location:
break
try:
found_at, doc_id, l_key, start, size, status, _next = self._locate_key(
key, location)
except IndexException:
break
else:
if status != 'd':
if l_key == key: # in case of hash function conflicts
offset -= 1
location = _next
while limit:
if not location:
break
try:
found_at, doc_id, l_key, start, size, status, _next = self._locate_key(
key, location)
except IndexException:
break
else:
if status != 'd':
if l_key == key: # in case of hash function conflicts
yield doc_id, start, size, status
limit -= 1
location = _next
def _calculate_position(self, key):
return abs(hash(key) & self.hash_lim) * self.bucket_line_size + self._start_ind
# TODO add cache!
def _locate_key(self, key, start):
"""
Locate position of the key, it will iterate using `next` field in record
until required key will be find.
:param key: the key to locate
:param start: position to start from
"""
location = start
while True:
self.buckets.seek(location)
data = self.buckets.read(self.entry_line_size)
# todo, maybe partial read there...
try:
doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data)
except struct.error:
raise ElemNotFound(
"Not found") # not found but might be also broken
if l_key == key:
break
else:
if not _next:
# not found
raise ElemNotFound("Not found")
else:
location = _next # go to next record
return location, doc_id, l_key, start, size, status, _next
# @lfu_cache(100)
def _locate_doc_id(self, doc_id, key, start):
"""
Locate position of the doc_id, it will iterate using `next` field in record
until required key will be find.
:param doc_id: the doc_id to locate
:param key: key value
:param start: position to start from
"""
location = start
while True:
self.buckets.seek(location)
data = self.buckets.read(self.entry_line_size)
try:
l_doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data)
except:
raise DocIdNotFound(
"Doc_id '%s' for '%s' not found" % (doc_id, key))
if l_doc_id == doc_id and l_key == key: # added for consistency
break
else:
if not _next:
# not found
raise DocIdNotFound(
"Doc_id '%s' for '%s' not found" % (doc_id, key))
else:
location = _next # go to next record
return location, doc_id, l_key, start, size, status, _next
def _find_place(self, start):
"""
Find a place to where put the key. It will iterate using `next` field in record, until
empty `next` found
:param start: position to start from
"""
location = start
while True:
self.buckets.seek(location)
data = self.buckets.read(self.entry_line_size)
# todo, maybe partial read there...
doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data)
if not _next or status == 'd':
return self.buckets.tell() - self.entry_line_size, doc_id, l_key, start, size, status, _next
else:
location = _next # go to next record
def update(self, doc_id, key, u_start=0, u_size=0, u_status='o'):
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
# test if it's unique or not really unique hash
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
else:
raise ElemNotFound("Location '%s' not found" % doc_id)
found_at, _doc_id, _key, start, size, status, _next = self._locate_doc_id(doc_id, key, location)
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(doc_id,
key,
u_start,
u_size,
u_status,
_next))
self.flush()
self._find_key.delete(key)
self._locate_doc_id.delete(doc_id)
return True
def insert(self, doc_id, key, start, size, status='o'):
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
# conflict occurs?
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
else:
location = 0
if location:
# last key with that hash
try:
found_at, _doc_id, _key, _start, _size, _status, _next = self._locate_doc_id(doc_id, key, location)
except DocIdNotFound:
found_at, _doc_id, _key, _start, _size, _status, _next = self._find_place(location)
self.buckets.seek(0, 2)
wrote_at = self.buckets.tell()
self.buckets.write(self.entry_struct.pack(doc_id,
key,
start,
size,
status,
_next))
# self.flush()
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(_doc_id,
_key,
_start,
_size,
_status,
wrote_at))
else:
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(doc_id,
key,
start,
size,
status,
_next))
self.flush()
self._locate_doc_id.delete(doc_id)
self._find_key.delete(_key)
# self._find_key.delete(key)
# self._locate_key.delete(_key)
return True
# raise NotImplementedError
else:
self.buckets.seek(0, 2)
wrote_at = self.buckets.tell()
# check if position is bigger than all hash entries...
if wrote_at < self.data_start:
self.buckets.seek(self.data_start)
wrote_at = self.buckets.tell()
self.buckets.write(self.entry_struct.pack(doc_id,
key,
start,
size,
status,
0))
# self.flush()
self._find_key.delete(key)
self.buckets.seek(start_position)
self.buckets.write(self.bucket_struct.pack(wrote_at))
self.flush()
return True
def get(self, key):
return self._find_key(self.make_key(key))
def get_many(self, key, limit=1, offset=0):
return self._find_key_many(self.make_key(key), limit, offset)
def all(self, limit=-1, offset=0):
self.buckets.seek(self.data_start)
while offset:
curr_data = self.buckets.read(self.entry_line_size)
if not curr_data:
break
try:
doc_id, key, start, size, status, _next = self.entry_struct.unpack(curr_data)
except IndexException:
break
else:
if status != 'd':
offset -= 1
while limit:
curr_data = self.buckets.read(self.entry_line_size)
if not curr_data:
break
try:
doc_id, key, start, size, status, _next = self.entry_struct.unpack(curr_data)
except IndexException:
break
else:
if status != 'd':
yield doc_id, key, start, size, status
limit -= 1
def _fix_link(self, key, pos_prev, pos_next):
# CHECKIT why I need that hack
if pos_prev >= self.data_start:
self.buckets.seek(pos_prev)
data = self.buckets.read(self.entry_line_size)
if data:
doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data)
self.buckets.seek(pos_prev)
self.buckets.write(self.entry_struct.pack(doc_id,
l_key,
start,
size,
status,
pos_next))
self.flush()
if pos_next:
self.buckets.seek(pos_next)
data = self.buckets.read(self.entry_line_size)
if data:
doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data)
self.buckets.seek(pos_next)
self.buckets.write(self.entry_struct.pack(doc_id,
l_key,
start,
size,
status,
_next))
self.flush()
return
def delete(self, doc_id, key, start=0, size=0):
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
else:
# case happens when trying to delete element with new index key in data
# after adding new index to database without reindex
raise TryReindexException()
found_at, _doc_id, _key, start, size, status, _next = self._locate_doc_id(doc_id, key, location)
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(doc_id,
key,
start,
size,
'd',
_next))
self.flush()
# self._fix_link(_key, _prev, _next)
self._find_key.delete(key)
self._locate_doc_id.delete(doc_id)
return True
def compact(self, hash_lim=None):
if not hash_lim:
hash_lim = self.hash_lim
compact_ind = self.__class__(
self.db_path, self.name + '_compact', hash_lim=hash_lim)
compact_ind.create_index()
gen = self.all()
while True:
try:
doc_id, key, start, size, status = gen.next()
except StopIteration:
break
self.storage._f.seek(start)
value = self.storage._f.read(size)
start_ = compact_ind.storage._f.tell()
compact_ind.storage._f.write(value)
compact_ind.insert(doc_id, key, start_, size, status)
compact_ind.close_index()
original_name = self.name
# os.unlink(os.path.join(self.db_path, self.name + "_buck"))
self.close_index()
shutil.move(os.path.join(compact_ind.db_path, compact_ind.
name + "_buck"), os.path.join(self.db_path, self.name + "_buck"))
shutil.move(os.path.join(compact_ind.db_path, compact_ind.
name + "_stor"), os.path.join(self.db_path, self.name + "_stor"))
# self.name = original_name
self.open_index() # reload...
self.name = original_name
self._save_params(dict(name=original_name))
self._fix_params()
self._clear_cache()
return True
def make_key(self, key):
return key
def make_key_value(self, data):
return '1', data
def _clear_cache(self):
self._find_key.clear()
self._locate_doc_id.clear()
def close_index(self):
super(IU_HashIndex, self).close_index()
self._clear_cache()
class IU_UniqueHashIndex(IU_HashIndex):
"""
Index for *unique* keys! Designed to be a **id** index.
That class is for Internal Use only, if you want to use UniqueHashIndex just subclass the :py:class:`UniqueHashIndex` instead this one.
That design is because main index logic should be always in database not in custom user indexes.
"""
def __init__(self, db_path, name, entry_line_format="<40s8sIIcI", *args, **kwargs):
if 'key' in kwargs:
raise IndexPreconditionsException(
"UniqueHashIndex doesn't accept key parameter'")
super(IU_UniqueHashIndex, self).__init__(db_path, name,
entry_line_format, *args, **kwargs)
self.create_key = random_hex_40 # : set the function to create random key when no _id given
# self.entry_struct=struct.Struct(entry_line_format)
# @lfu_cache(100)
def _find_key(self, key):
"""
Find the key position
:param key: the key to find
"""
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
found_at, l_key, rev, start, size, status, _next = self._locate_key(
key, location)
return l_key, rev, start, size, status
else:
return None, None, 0, 0, 'u'
def _find_key_many(self, *args, **kwargs):
raise NotImplementedError()
def _find_place(self, start, key):
"""
Find a place to where put the key. It will iterate using `next` field in record, until
empty `next` found
:param start: position to start from
"""
location = start
while True:
self.buckets.seek(location)
data = self.buckets.read(self.entry_line_size)
# todo, maybe partial read there...
l_key, rev, start, size, status, _next = self.entry_struct.unpack(
data)
if l_key == key:
raise IndexException("The '%s' key already exists" % key)
if not _next or status == 'd':
return self.buckets.tell() - self.entry_line_size, l_key, rev, start, size, status, _next
else:
location = _next # go to next record
# @lfu_cache(100)
def _locate_key(self, key, start):
"""
Locate position of the key, it will iterate using `next` field in record
until required key will be find.
:param key: the key to locate
:param start: position to start from
"""
location = start
while True:
self.buckets.seek(location)
data = self.buckets.read(self.entry_line_size)
# todo, maybe partial read there...
try:
l_key, rev, start, size, status, _next = self.entry_struct.unpack(data)
except struct.error:
raise ElemNotFound("Location '%s' not found" % key)
if l_key == key:
break
else:
if not _next:
# not found
raise ElemNotFound("Location '%s' not found" % key)
else:
location = _next # go to next record
return self.buckets.tell() - self.entry_line_size, l_key, rev, start, size, status, _next
def update(self, key, rev, u_start=0, u_size=0, u_status='o'):
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
# test if it's unique or not really unique hash
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
else:
raise ElemNotFound("Location '%s' not found" % key)
found_at, _key, _rev, start, size, status, _next = self._locate_key(
key, location)
if u_start == 0:
u_start = start
if u_size == 0:
u_size = size
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(key,
rev,
u_start,
u_size,
u_status,
_next))
self.flush()
self._find_key.delete(key)
return True
def insert(self, key, rev, start, size, status='o'):
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
# conflict occurs?
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
else:
location = 0
if location:
# last key with that hash
found_at, _key, _rev, _start, _size, _status, _next = self._find_place(
location, key)
self.buckets.seek(0, 2)
wrote_at = self.buckets.tell()
# check if position is bigger than all hash entries...
if wrote_at < self.data_start:
self.buckets.seek(self.data_start)
wrote_at = self.buckets.tell()
self.buckets.write(self.entry_struct.pack(key,
rev,
start,
size,
status,
_next))
# self.flush()
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(_key,
_rev,
_start,
_size,
_status,
wrote_at))
self.flush()
self._find_key.delete(_key)
# self._locate_key.delete(_key)
return True
# raise NotImplementedError
else:
self.buckets.seek(0, 2)
wrote_at = self.buckets.tell()
# check if position is bigger than all hash entries...
if wrote_at < self.data_start:
self.buckets.seek(self.data_start)
wrote_at = self.buckets.tell()
self.buckets.write(self.entry_struct.pack(key,
rev,
start,
size,
status,
0))
# self.flush()
self.buckets.seek(start_position)
self.buckets.write(self.bucket_struct.pack(wrote_at))
self.flush()
self._find_key.delete(key)
return True
def all(self, limit=-1, offset=0):
self.buckets.seek(self.data_start)
while offset:
curr_data = self.buckets.read(self.entry_line_size)
if not curr_data:
break
try:
doc_id, rev, start, size, status, next = self.entry_struct.unpack(curr_data)
except IndexException:
break
else:
if status != 'd':
offset -= 1
while limit:
curr_data = self.buckets.read(self.entry_line_size)
if not curr_data:
break
try:
doc_id, rev, start, size, status, next = self.entry_struct.unpack(curr_data)
except IndexException:
break
else:
if status != 'd':
yield doc_id, rev, start, size, status
limit -= 1
def get_many(self, *args, **kwargs):
raise NotImplementedError()
def delete(self, key, start=0, size=0):
self.update(key, '00000000', start, size, 'd')
def make_key_value(self, data):
_id = data['_id']
try:
_id = bytes(data['_id'])
except:
raise IndexPreconditionsException(
"_id must be valid string/bytes object")
if len(_id) != 40:
raise IndexPreconditionsException('Invalid _id length: {0} for id: {1}'.format(len(_id), _id))
del data['_id']
del data['_rev']
return _id, data
def destroy(self):
Index.destroy(self)
self._clear_cache()
def _clear_cache(self):
self._find_key.clear()
def insert_with_storage(self, _id, _rev, value):
if value:
start, size = self.storage.insert(value)
else:
start = 1
size = 0
return self.insert(_id, _rev, start, size)
def update_with_storage(self, _id, _rev, value):
if value:
start, size = self.storage.insert(value)
else:
start = 1
size = 0
return self.update(_id, _rev, start, size)
class DummyHashIndex(IU_HashIndex):
def __init__(self, db_path, name, entry_line_format="<40s4sIIcI", *args, **kwargs):
super(DummyHashIndex, self).__init__(db_path, name,
entry_line_format, *args, **kwargs)
self.create_key = random_hex_40 # : set the function to create random key when no _id given
# self.entry_struct=struct.Struct(entry_line_format)
def update(self, *args, **kwargs):
return True
def insert(self, *args, **kwargs):
return True
def all(self, *args, **kwargs):
raise StopIteration
def get(self, *args, **kwargs):
raise ElemNotFound
def get_many(self, *args, **kwargs):
raise StopIteration
def delete(self, *args, **kwargs):
pass
def make_key_value(self, data):
return '1', {'_': 1}
def destroy(self):
pass
def _clear_cache(self):
pass
def _open_storage(self):
if not self.storage:
self.storage = DummyStorage()
self.storage.open()
def _create_storage(self):
if not self.storage:
self.storage = DummyStorage()
self.storage.create()
class IU_MultiHashIndex(IU_HashIndex):
"""
Class that allows to index more than one key per database record.
It operates very well on GET/INSERT. It's not optimized for
UPDATE operations (will always readd everything)
"""
def __init__(self, *args, **kwargs):
super(IU_MultiHashIndex, self).__init__(*args, **kwargs)
def insert(self, doc_id, key, start, size, status='o'):
if isinstance(key, (list, tuple)):
key = set(key)
elif not isinstance(key, set):
key = set([key])
ins = super(IU_MultiHashIndex, self).insert
for curr_key in key:
ins(doc_id, curr_key, start, size, status)
return True
def update(self, doc_id, key, u_start, u_size, u_status='o'):
if isinstance(key, (list, tuple)):
key = set(key)
elif not isinstance(key, set):
key = set([key])
upd = super(IU_MultiHashIndex, self).update
for curr_key in key:
upd(doc_id, curr_key, u_start, u_size, u_status)
def delete(self, doc_id, key, start=0, size=0):
if isinstance(key, (list, tuple)):
key = set(key)
elif not isinstance(key, set):
key = set([key])
delete = super(IU_MultiHashIndex, self).delete
for curr_key in key:
delete(doc_id, curr_key, start, size)
def get(self, key):
return super(IU_MultiHashIndex, self).get(key)
def make_key_value(self, data):
raise NotImplementedError()
# classes for public use, done in this way because of
# generation static files with indexes (_index directory)
class HashIndex(IU_HashIndex):
"""
That class is designed to be used in custom indexes.
"""
pass
class UniqueHashIndex(IU_UniqueHashIndex):
"""
That class is designed to be used in custom indexes. It's designed to be **id** index.
"""
pass
class MultiHashIndex(IU_MultiHashIndex):
"""
That class is designed to be used in custom indexes.
"""
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import requests
import tenacity
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
class HttpHook(BaseHook):
"""
Interact with HTTP servers.
:param http_conn_id: connection that has the base API url i.e https://www.google.com/
and optional authentication credentials. Default headers can also be specified in
the Extra field in json format.
:type http_conn_id: str
:param method: the API method to be called
:type method: str
"""
def __init__(
self,
method='POST',
http_conn_id='http_default'
):
self.http_conn_id = http_conn_id
self.method = method.upper()
self.base_url = None
self._retry_obj = None
# headers may be passed through directly or in the "extra" field in the connection
# definition
def get_conn(self, headers=None):
"""
Returns http session for use with requests
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
"""
session = requests.Session()
if self.http_conn_id:
conn = self.get_connection(self.http_conn_id)
if conn.host and "://" in conn.host:
self.base_url = conn.host
else:
# schema defaults to HTTP
schema = conn.schema if conn.schema else "http"
host = conn.host if conn.host else ""
self.base_url = schema + "://" + host
if conn.port:
self.base_url = self.base_url + ":" + str(conn.port)
if conn.login:
session.auth = (conn.login, conn.password)
if conn.extra:
try:
session.headers.update(conn.extra_dejson)
except TypeError:
self.log.warning('Connection to %s has invalid extra field.', conn.host)
if headers:
session.headers.update(headers)
return session
def run(self, endpoint, data=None, headers=None, extra_options=None, **request_kwargs):
r"""
Performs the request
:param endpoint: the endpoint to be called i.e. resource/v1/query?
:type endpoint: str
:param data: payload to be uploaded or request parameters
:type data: dict
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
:param extra_options: additional options to be used when executing the request
i.e. {'check_response': False} to avoid checking raising exceptions on non
2XX or 3XX status codes
:type extra_options: dict
:param \**request_kwargs: Additional kwargs to pass when creating a request.
For example, ``run(json=obj)`` is passed as ``requests.Request(json=obj)``
"""
extra_options = extra_options or {}
session = self.get_conn(headers)
if self.base_url and not self.base_url.endswith('/') and \
endpoint and not endpoint.startswith('/'):
url = self.base_url + '/' + endpoint
else:
url = (self.base_url or '') + (endpoint or '')
req = None
if self.method == 'GET':
# GET uses params
req = requests.Request(self.method,
url,
params=data,
headers=headers,
**request_kwargs)
elif self.method == 'HEAD':
# HEAD doesn't use params
req = requests.Request(self.method,
url,
headers=headers,
**request_kwargs)
else:
# Others use data
req = requests.Request(self.method,
url,
data=data,
headers=headers,
**request_kwargs)
prepped_request = session.prepare_request(req)
self.log.info("Sending '%s' to url: %s", self.method, url)
return self.run_and_check(session, prepped_request, extra_options)
def check_response(self, response):
"""
Checks the status code and raise an AirflowException exception on non 2XX or 3XX
status codes
:param response: A requests response object
:type response: requests.response
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
self.log.error("HTTP error: %s", response.reason)
self.log.error(response.text)
raise AirflowException(str(response.status_code) + ":" + response.reason)
def run_and_check(self, session, prepped_request, extra_options):
"""
Grabs extra options like timeout and actually runs the request,
checking for the result
:param session: the session to be used to execute the request
:type session: requests.Session
:param prepped_request: the prepared request generated in run()
:type prepped_request: session.prepare_request
:param extra_options: additional options to be used when executing the request
i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX
or 3XX status codes
:type extra_options: dict
"""
extra_options = extra_options or {}
try:
response = session.send(
prepped_request,
stream=extra_options.get("stream", False),
verify=extra_options.get("verify", True),
proxies=extra_options.get("proxies", {}),
cert=extra_options.get("cert"),
timeout=extra_options.get("timeout"),
allow_redirects=extra_options.get("allow_redirects", True))
if extra_options.get('check_response', True):
self.check_response(response)
return response
except requests.exceptions.ConnectionError as ex:
self.log.warning(str(ex) + ' Tenacity will retry to execute the operation')
raise ex
def run_with_advanced_retry(self, _retry_args, *args, **kwargs):
"""
Runs Hook.run() with a Tenacity decorator attached to it. This is useful for
connectors which might be disturbed by intermittent issues and should not
instantly fail.
:param _retry_args: Arguments which define the retry behaviour.
See Tenacity documentation at https://github.com/jd/tenacity
:type _retry_args: dict
.. code-block:: python
hook = HttpHook(http_conn_id='my_conn',method='GET')
retry_args = dict(
wait=tenacity.wait_exponential(),
stop=tenacity.stop_after_attempt(10),
retry=requests.exceptions.ConnectionError
)
hook.run_with_advanced_retry(
endpoint='v1/test',
_retry_args=retry_args
)
"""
self._retry_obj = tenacity.Retrying(
**_retry_args
)
return self._retry_obj(self.run, *args, **kwargs)
|
|
# Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Functions and classes common to multiple pymongo modules."""
import sys
import warnings
from pymongo import read_preferences
from pymongo.auth import MECHANISMS
from pymongo.read_preferences import ReadPreference
from pymongo.errors import ConfigurationError
HAS_SSL = True
try:
import ssl
except ImportError:
HAS_SSL = False
# Jython 2.7 includes an incomplete ssl module. See PYTHON-498.
if sys.platform.startswith('java'):
HAS_SSL = False
def raise_config_error(key, dummy):
"""Raise ConfigurationError with the given key name."""
raise ConfigurationError("Unknown option %s" % (key,))
def validate_boolean(option, value):
"""Validates that 'value' is 'true' or 'false'.
"""
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
if value not in ('true', 'false'):
raise ConfigurationError("The value of %s must be "
"'true' or 'false'" % (option,))
return value == 'true'
raise TypeError("Wrong type for %s, value must be a boolean" % (option,))
def validate_integer(option, value):
"""Validates that 'value' is an integer (or basestring representation).
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if not value.isdigit():
raise ConfigurationError("The value of %s must be "
"an integer" % (option,))
return int(value)
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
def validate_positive_integer(option, value):
"""Validate that 'value' is a positive integer.
"""
val = validate_integer(option, value)
if val < 0:
raise ConfigurationError("The value of %s must be "
"a positive integer" % (option,))
return val
def validate_readable(option, value):
"""Validates that 'value' is file-like and readable.
"""
# First make sure its a string py3.3 open(True, 'r') succeeds
# Used in ssl cert checking due to poor ssl module error reporting
value = validate_basestring(option, value)
open(value, 'r').close()
return value
def validate_cert_reqs(option, value):
"""Validate the cert reqs are valid. It must be None or one of the three
values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or ``ssl.CERT_REQUIRED``"""
if value is None:
return value
if HAS_SSL:
if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED):
return value
raise ConfigurationError("The value of %s must be one of: "
"`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or "
"`ssl.CERT_REQUIRED" % (option,))
else:
raise ConfigurationError("The value of %s is set but can't be "
"validated. The ssl module is not available"
% (option,))
def validate_positive_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or None.
"""
if value is None:
return value
return validate_positive_integer(option, value)
def validate_basestring(option, value):
"""Validates that 'value' is an instance of `basestring`.
"""
if isinstance(value, basestring):
return value
raise TypeError("Wrong type for %s, value must be an "
"instance of %s" % (option, basestring.__name__))
def validate_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if value.isdigit():
return int(value)
return value
raise TypeError("Wrong type for %s, value must be an "
"integer or a string" % (option,))
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
err = ConfigurationError("%s must be a positive int or float" % (option,))
try:
value = float(value)
except (ValueError, TypeError):
raise err
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise err
return value
def validate_timeout_or_none(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds.
"""
if value is None:
return value
return validate_positive_float(option, value) / 1000.0
def validate_read_preference(dummy, value):
"""Validate read preference for a ReplicaSetConnection.
"""
if value in read_preferences.modes:
return value
# Also allow string form of enum for uri_parser
try:
return read_preferences.mongos_enum(value)
except ValueError:
raise ConfigurationError("Not a valid read preference")
def validate_tag_sets(dummy, value):
"""Validate tag sets for a ReplicaSetConnection.
"""
if value is None:
return [{}]
if not isinstance(value, list):
raise ConfigurationError((
"Tag sets %s invalid, must be a list" ) % repr(value))
if len(value) == 0:
raise ConfigurationError((
"Tag sets %s invalid, must be None or contain at least one set of"
" tags") % repr(value))
for tags in value:
if not isinstance(tags, dict):
raise ConfigurationError(
"Tag set %s invalid, must be a dict" % repr(tags))
return value
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
if value not in MECHANISMS:
raise ConfigurationError("%s must be in "
"%s" % (option, MECHANISMS))
return value
# jounal is an alias for j,
# wtimeoutms is an alias for wtimeout
VALIDATORS = {
'replicaset': validate_basestring,
'slaveok': validate_boolean,
'slave_okay': validate_boolean,
'safe': validate_boolean,
'w': validate_int_or_basestring,
'wtimeout': validate_integer,
'wtimeoutms': validate_integer,
'fsync': validate_boolean,
'j': validate_boolean,
'journal': validate_boolean,
'connecttimeoutms': validate_timeout_or_none,
'sockettimeoutms': validate_timeout_or_none,
'ssl': validate_boolean,
'ssl_keyfile': validate_readable,
'ssl_certfile': validate_readable,
'ssl_cert_reqs': validate_cert_reqs,
'ssl_ca_certs': validate_readable,
'readpreference': validate_read_preference,
'read_preference': validate_read_preference,
'tag_sets': validate_tag_sets,
'secondaryacceptablelatencyms': validate_positive_float,
'secondary_acceptable_latency_ms': validate_positive_float,
'auto_start_request': validate_boolean,
'use_greenlets': validate_boolean,
'authmechanism': validate_auth_mechanism,
'authsource': validate_basestring,
}
def validate(option, value):
"""Generic validation function.
"""
lower = option.lower()
validator = VALIDATORS.get(lower, raise_config_error)
value = validator(option, value)
return lower, value
SAFE_OPTIONS = frozenset([
'w',
'wtimeout',
'wtimeoutms',
'fsync',
'j',
'journal'
])
class WriteConcern(dict):
def __init__(self, *args, **kwargs):
"""A subclass of dict that overrides __setitem__ to
validate write concern options.
"""
super(WriteConcern, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key not in SAFE_OPTIONS:
raise ConfigurationError("%s is not a valid write "
"concern option." % (key,))
key, value = validate(key, value)
super(WriteConcern, self).__setitem__(key, value)
class BaseObject(object):
"""A base class that provides attributes and methods common
to multiple pymongo classes.
SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO 10GEN
"""
def __init__(self, **options):
self.__slave_okay = False
self.__read_pref = ReadPreference.PRIMARY
self.__tag_sets = [{}]
self.__secondary_acceptable_latency_ms = 15
self.__safe = None
self.__write_concern = WriteConcern()
self.__set_options(options)
if (self.__read_pref == ReadPreference.PRIMARY
and self.__tag_sets != [{}]
):
raise ConfigurationError(
"ReadPreference PRIMARY cannot be combined with tags")
# If safe hasn't been implicitly set by write concerns then set it.
if self.__safe is None:
if options.get("w") == 0:
self.__safe = False
else:
self.__safe = validate_boolean('safe', options.get("safe", True))
# Note: 'safe' is always passed by Connection and ReplicaSetConnection
# Always do the most "safe" thing, but warn about conflicts.
if self.__safe and options.get('w') == 0:
warnings.warn("Conflicting write concerns. 'w' set to 0 "
"but other options have enabled write concern. "
"Please set 'w' to a value other than 0.",
UserWarning)
def __set_safe_option(self, option, value):
"""Validates and sets getlasterror options for this
object (Connection, Database, Collection, etc.)
"""
if value is None:
self.__write_concern.pop(option, None)
else:
self.__write_concern[option] = value
if option != "w" or value != 0:
self.__safe = True
def __set_options(self, options):
"""Validates and sets all options passed to this object."""
for option, value in options.iteritems():
if option in ('slave_okay', 'slaveok'):
self.__slave_okay = validate_boolean(option, value)
elif option in ('read_preference', "readpreference"):
self.__read_pref = validate_read_preference(option, value)
elif option == 'tag_sets':
self.__tag_sets = validate_tag_sets(option, value)
elif option in (
'secondaryacceptablelatencyms',
'secondary_acceptable_latency_ms'
):
self.__secondary_acceptable_latency_ms = \
validate_positive_float(option, value)
elif option in SAFE_OPTIONS:
if option == 'journal':
self.__set_safe_option('j', value)
elif option == 'wtimeoutms':
self.__set_safe_option('wtimeout', value)
else:
self.__set_safe_option(option, value)
def __set_write_concern(self, value):
"""Property setter for write_concern."""
if not isinstance(value, dict):
raise ConfigurationError("write_concern must be an "
"instance of dict or a subclass.")
# Make a copy here to avoid users accidentally setting the
# same dict on multiple instances.
wc = WriteConcern()
for k, v in value.iteritems():
# Make sure we validate each option.
wc[k] = v
self.__write_concern = wc
def __get_write_concern(self):
"""The default write concern for this instance.
Supports dict style access for getting/setting write concern
options. Valid options include:
- `w`: (integer or string) If this is a replica set, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<int>` always includes the replica set
primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). **Setting w=0 disables write
acknowledgement and all other write concern options.**
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Ignored if the server is running without journaling.
- `fsync`: If ``True`` force the database to fsync all files before
returning. When used with `j` the server awaits the next group
commit before returning.
>>> m = pymongo.MongoClient()
>>> m.write_concern
{}
>>> m.write_concern = {'w': 2, 'wtimeout': 1000}
>>> m.write_concern
{'wtimeout': 1000, 'w': 2}
>>> m.write_concern['j'] = True
>>> m.write_concern
{'wtimeout': 1000, 'j': True, 'w': 2}
>>> m.write_concern = {'j': True}
>>> m.write_concern
{'j': True}
>>> # Disable write acknowledgement and write concern
...
>>> m.write_concern['w'] = 0
.. note:: Accessing :attr:`write_concern` returns its value
(a subclass of :class:`dict`), not a copy.
.. warning:: If you are using :class:`~pymongo.connection.Connection`
or :class:`~pymongo.replica_set_connection.ReplicaSetConnection`
make sure you explicitly set ``w`` to 1 (or a greater value) or
:attr:`safe` to ``True``. Unlike calling
:meth:`set_lasterror_options`, setting an option in
:attr:`write_concern` does not implicitly set :attr:`safe`
to ``True``.
"""
# To support dict style access we have to return the actual
# WriteConcern here, not a copy.
return self.__write_concern
write_concern = property(__get_write_concern, __set_write_concern)
def __get_slave_okay(self):
"""DEPRECATED. Use :attr:`read_preference` instead.
.. versionchanged:: 2.1
Deprecated slave_okay.
.. versionadded:: 2.0
"""
return self.__slave_okay
def __set_slave_okay(self, value):
"""Property setter for slave_okay"""
warnings.warn("slave_okay is deprecated. Please use "
"read_preference instead.", DeprecationWarning,
stacklevel=2)
self.__slave_okay = validate_boolean('slave_okay', value)
slave_okay = property(__get_slave_okay, __set_slave_okay)
def __get_read_pref(self):
"""The read preference mode for this instance.
See :class:`~pymongo.read_preferences.ReadPreference` for available options.
.. versionadded:: 2.1
"""
return self.__read_pref
def __set_read_pref(self, value):
"""Property setter for read_preference"""
self.__read_pref = validate_read_preference('read_preference', value)
read_preference = property(__get_read_pref, __set_read_pref)
def __get_acceptable_latency(self):
"""Any replica-set member whose ping time is within
secondary_acceptable_latency_ms of the nearest member may accept
reads. Defaults to 15 milliseconds.
See :class:`~pymongo.read_preferences.ReadPreference`.
.. versionadded:: 2.3
.. note:: ``secondary_acceptable_latency_ms`` is ignored when talking to a
replica set *through* a mongos. The equivalent is the localThreshold_ command
line option.
.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold
"""
return self.__secondary_acceptable_latency_ms
def __set_acceptable_latency(self, value):
"""Property setter for secondary_acceptable_latency_ms"""
self.__secondary_acceptable_latency_ms = (validate_positive_float(
'secondary_acceptable_latency_ms', value))
secondary_acceptable_latency_ms = property(
__get_acceptable_latency, __set_acceptable_latency)
def __get_tag_sets(self):
"""Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to
read only from members whose ``dc`` tag has the value ``"ny"``.
To specify a priority-order for tag sets, provide a list of
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
set, ``{}``, means "read from any member that matches the mode,
ignoring tags." ReplicaSetConnection tries each set of tags in turn
until it finds a set of tags with at least one matching member.
.. seealso:: `Data-Center Awareness
<http://www.mongodb.org/display/DOCS/Data+Center+Awareness>`_
.. versionadded:: 2.3
"""
return self.__tag_sets
def __set_tag_sets(self, value):
"""Property setter for tag_sets"""
self.__tag_sets = validate_tag_sets('tag_sets', value)
tag_sets = property(__get_tag_sets, __set_tag_sets)
def __get_safe(self):
"""**DEPRECATED:** Use the 'w' :attr:`write_concern` option instead.
Use getlasterror with every write operation?
.. versionadded:: 2.0
"""
return self.__safe
def __set_safe(self, value):
"""Property setter for safe"""
warnings.warn("safe is deprecated. Please use the"
" 'w' write_concern option instead.",
DeprecationWarning, stacklevel=2)
self.__safe = validate_boolean('safe', value)
safe = property(__get_safe, __set_safe)
def get_lasterror_options(self):
"""DEPRECATED: Use :attr:`write_concern` instead.
Returns a dict of the getlasterror options set on this instance.
.. versionchanged:: 2.4
Deprecated get_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("get_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
return self.__write_concern.copy()
def set_lasterror_options(self, **kwargs):
"""DEPRECATED: Use :attr:`write_concern` instead.
Set getlasterror options for this instance.
Valid options include j=<bool>, w=<int/string>, wtimeout=<int>,
and fsync=<bool>. Implies safe=True.
:Parameters:
- `**kwargs`: Options should be passed as keyword
arguments (e.g. w=2, fsync=True)
.. versionchanged:: 2.4
Deprecated set_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("set_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
for key, value in kwargs.iteritems():
self.__set_safe_option(key, value)
def unset_lasterror_options(self, *options):
"""DEPRECATED: Use :attr:`write_concern` instead.
Unset getlasterror options for this instance.
If no options are passed unsets all getlasterror options.
This does not set `safe` to False.
:Parameters:
- `*options`: The list of options to unset.
.. versionchanged:: 2.4
Deprecated unset_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("unset_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
if len(options):
for option in options:
self.__write_concern.pop(option, None)
else:
self.__write_concern = WriteConcern()
def _get_wc_override(self):
"""Get write concern override.
Used in internal methods that **must** do acknowledged write ops.
We don't want to override user write concern options if write concern
is already enabled.
"""
if self.safe and self.__write_concern.get('w') != 0:
return {}
return {'w': 1}
def _get_write_mode(self, safe=None, **options):
"""Get the current write mode.
Determines if the current write is safe or not based on the
passed in or inherited safe value, write_concern values, or
passed options.
:Parameters:
- `safe`: check that the operation succeeded?
- `**options`: overriding write concern options.
.. versionadded:: 2.3
"""
# Don't ever send w=1 to the server.
def pop1(dct):
if dct.get('w') == 1:
dct.pop('w')
return dct
if safe is not None:
warnings.warn("The safe parameter is deprecated. Please use "
"write concern options instead.", DeprecationWarning,
stacklevel=3)
validate_boolean('safe', safe)
# Passed options override collection level defaults.
if safe is not None or options:
if safe or options:
if not options:
options = self.__write_concern.copy()
# Backwards compatability edge case. Call getLastError
# with no options if safe=True was passed but collection
# level defaults have been disabled with w=0.
# These should be equivalent:
# Connection(w=0).foo.bar.insert({}, safe=True)
# MongoClient(w=0).foo.bar.insert({}, w=1)
if options.get('w') == 0:
return True, {}
# Passing w=0 overrides passing safe=True.
return options.get('w') != 0, pop1(options)
return False, {}
# Fall back to collection level defaults.
# w=0 takes precedence over self.safe = True
if self.__write_concern.get('w') == 0:
return False, {}
elif self.safe or self.__write_concern.get('w', 0) != 0:
return True, pop1(self.__write_concern.copy())
return False, {}
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from openstack_dashboard.api import base
from openstack_dashboard.api import fwaas
from openstack_dashboard.api import lbaas
from openstack_dashboard.api import neutron
from openstack_dashboard.api import vpn
from openstack_dashboard.test.test_data import utils
def data(TEST):
# data returned by openstack_dashboard.api.neutron wrapper
TEST.agents = utils.TestDataContainer()
TEST.networks = utils.TestDataContainer()
TEST.subnets = utils.TestDataContainer()
TEST.ports = utils.TestDataContainer()
TEST.routers = utils.TestDataContainer()
TEST.routers_with_rules = utils.TestDataContainer()
TEST.q_floating_ips = utils.TestDataContainer()
TEST.q_secgroups = utils.TestDataContainer()
TEST.q_secgroup_rules = utils.TestDataContainer()
TEST.providers = utils.TestDataContainer()
TEST.pools = utils.TestDataContainer()
TEST.vips = utils.TestDataContainer()
TEST.members = utils.TestDataContainer()
TEST.monitors = utils.TestDataContainer()
TEST.neutron_quotas = utils.TestDataContainer()
TEST.net_profiles = utils.TestDataContainer()
TEST.policy_profiles = utils.TestDataContainer()
TEST.network_profile_binding = utils.TestDataContainer()
TEST.policy_profile_binding = utils.TestDataContainer()
TEST.vpnservices = utils.TestDataContainer()
TEST.ikepolicies = utils.TestDataContainer()
TEST.ipsecpolicies = utils.TestDataContainer()
TEST.ipsecsiteconnections = utils.TestDataContainer()
TEST.firewalls = utils.TestDataContainer()
TEST.fw_policies = utils.TestDataContainer()
TEST.fw_rules = utils.TestDataContainer()
# data return by neutronclient
TEST.api_agents = utils.TestDataContainer()
TEST.api_networks = utils.TestDataContainer()
TEST.api_subnets = utils.TestDataContainer()
TEST.api_ports = utils.TestDataContainer()
TEST.api_routers = utils.TestDataContainer()
TEST.api_q_floating_ips = utils.TestDataContainer()
TEST.api_q_secgroups = utils.TestDataContainer()
TEST.api_q_secgroup_rules = utils.TestDataContainer()
TEST.api_pools = utils.TestDataContainer()
TEST.api_vips = utils.TestDataContainer()
TEST.api_members = utils.TestDataContainer()
TEST.api_monitors = utils.TestDataContainer()
TEST.api_extensions = utils.TestDataContainer()
TEST.api_net_profiles = utils.TestDataContainer()
TEST.api_policy_profiles = utils.TestDataContainer()
TEST.api_network_profile_binding = utils.TestDataContainer()
TEST.api_policy_profile_binding = utils.TestDataContainer()
TEST.api_vpnservices = utils.TestDataContainer()
TEST.api_ikepolicies = utils.TestDataContainer()
TEST.api_ipsecpolicies = utils.TestDataContainer()
TEST.api_ipsecsiteconnections = utils.TestDataContainer()
TEST.api_firewalls = utils.TestDataContainer()
TEST.api_fw_policies = utils.TestDataContainer()
TEST.api_fw_rules = utils.TestDataContainer()
#------------------------------------------------------------
# 1st network
network_dict = {'admin_state_up': True,
'id': '82288d84-e0a5-42ac-95be-e6af08727e42',
'name': 'net1',
'status': 'ACTIVE',
'subnets': ['e8abc972-eb0c-41f1-9edd-4bc6e3bcd8c9'],
'tenant_id': '1',
'router:external': False,
'shared': False}
subnet_dict = {'allocation_pools': [{'end': '10.0.0.254',
'start': '10.0.0.2'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': '10.0.0.0/24',
'enable_dhcp': True,
'gateway_ip': '10.0.0.1',
'id': network_dict['subnets'][0],
'ip_version': 4,
'name': 'mysubnet1',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
# network profile for network when using the cisco n1k plugin
net_profile_dict = {'name': 'net_profile_test1',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '3000-31000',
'id':
'00000000-1111-1111-1111-000000000000',
'tenant_id': network_dict['tenant_id']}
TEST.api_net_profiles.add(net_profile_dict)
TEST.net_profiles.add(neutron.Profile(net_profile_dict))
# policy profile for port when using the cisco n1k plugin
policy_profile_dict = {'name': 'policy_profile_test1',
'id':
'00000000-9999-9999-9999-000000000000'}
TEST.api_policy_profiles.add(policy_profile_dict)
TEST.policy_profiles.add(neutron.Profile(policy_profile_dict))
# network profile binding
network_profile_binding_dict = {'profile_id':
'00000000-1111-1111-1111-000000000000',
'tenant_id': network_dict['tenant_id']}
TEST.api_network_profile_binding.add(network_profile_binding_dict)
TEST.network_profile_binding.add(neutron.Profile(
network_profile_binding_dict))
# policy profile binding
policy_profile_binding_dict = {'profile_id':
'00000000-9999-9999-9999-000000000000',
'tenant_id': network_dict['tenant_id']}
TEST.api_policy_profile_binding.add(policy_profile_binding_dict)
TEST.policy_profile_binding.add(neutron.Profile(
policy_profile_binding_dict))
# ports on 1st network
port_dict = {'admin_state_up': True,
'device_id': 'af75c8e5-a1cc-4567-8d04-44fcd6922890',
'device_owner': 'network:dhcp',
'fixed_ips': [{'ip_address': '10.0.0.3',
'subnet_id': subnet_dict['id']}],
'id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
'mac_address': 'fa:16:3e:9c:d5:7e',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id']}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
port_dict = {'admin_state_up': True,
'device_id': '1',
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': '10.0.0.4',
'subnet_id': subnet_dict['id']}],
'id': '7e6ce62c-7ea2-44f8-b6b4-769af90a8406',
'mac_address': 'fa:16:3e:9d:e6:2f',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id']}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
assoc_port = port_dict
#------------------------------------------------------------
# 2nd network
network_dict = {'admin_state_up': True,
'id': '72c3ab6c-c80f-4341-9dc5-210fa31ac6c2',
'name': 'net2',
'status': 'ACTIVE',
'subnets': ['3f7c5d79-ee55-47b0-9213-8e669fb03009'],
'tenant_id': '2',
'router:external': False,
'shared': True}
subnet_dict = {'allocation_pools': [{'end': '172.16.88.254',
'start': '172.16.88.2'}],
'dns_nameservers': ['10.56.1.20', '10.56.1.21'],
'host_routes': [{'destination': '192.168.20.0/24',
'nexthop': '172.16.88.253'},
{'destination': '192.168.21.0/24',
'nexthop': '172.16.88.252'}],
'cidr': '172.16.88.0/24',
'enable_dhcp': True,
'gateway_ip': '172.16.88.1',
'id': '3f7c5d79-ee55-47b0-9213-8e669fb03009',
'ip_version': 4,
'name': 'aaaa',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
port_dict = {'admin_state_up': True,
'device_id': '2',
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': '172.16.88.3',
'subnet_id': subnet_dict['id']}],
'id': '1db2cc37-3553-43fa-b7e2-3fc4eb4f9905',
'mac_address': 'fa:16:3e:56:e6:2f',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id']}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
#------------------------------------------------------------
# external network
network_dict = {'admin_state_up': True,
'id': '9b466b94-213a-4cda-badf-72c102a874da',
'name': 'ext_net',
'status': 'ACTIVE',
'subnets': ['d6bdc71c-7566-4d32-b3ff-36441ce746e8'],
'tenant_id': '3',
'router:external': True,
'shared': False}
subnet_dict = {'allocation_pools': [{'start': '172.24.4.226.',
'end': '172.24.4.238'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': '172.24.4.0/28',
'enable_dhcp': False,
'gateway_ip': '172.24.4.225',
'id': 'd6bdc71c-7566-4d32-b3ff-36441ce746e8',
'ip_version': 4,
'name': 'ext_subnet',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
ext_net = network_dict
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
#------------------------------------------------------------
# Set up router data
port_dict = {'admin_state_up': True,
'device_id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
'device_owner': 'network:router_gateway',
'fixed_ips': [{'ip_address': '10.0.0.3',
'subnet_id': subnet_dict['id']}],
'id': '44ec6726-4bdc-48c5-94d4-df8d1fbf613b',
'mac_address': 'fa:16:3e:9c:d5:7e',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': '1'}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
router_dict = {'id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
'name': 'router1',
'external_gateway_info':
{'network_id': ext_net['id']},
'tenant_id': '1'}
TEST.api_routers.add(router_dict)
TEST.routers.add(neutron.Router(router_dict))
router_dict = {'id': '10e3dc42-1ce1-4d48-87cf-7fc333055d6c',
'name': 'router2',
'external_gateway_info':
{'network_id': ext_net['id']},
'tenant_id': '1'}
TEST.api_routers.add(router_dict)
TEST.routers.add(neutron.Router(router_dict))
router_dict = {'id': '71fb25e9-cd9f-4a44-a780-85ec3bd8bdd7',
'name': 'rulerouter',
'external_gateway_info':
{'network_id': ext_net['id']},
'tenant_id': '1',
'router_rules': [{'id': '101',
'action': 'deny',
'source': 'any',
'destination': 'any',
'nexthops': []},
{'id': '102',
'action': 'permit',
'source': 'any',
'destination': '8.8.8.8/32',
'nexthops': ['1.0.0.2', '1.0.0.1']}]}
TEST.api_routers.add(router_dict)
TEST.routers_with_rules.add(neutron.Router(router_dict))
#------------------------------------------------------------
# floating IP
# unassociated
fip_dict = {'tenant_id': '1',
'floating_ip_address': '172.16.88.227',
'floating_network_id': ext_net['id'],
'id': '9012cd70-cfae-4e46-b71e-6a409e9e0063',
'fixed_ip_address': None,
'port_id': None,
'router_id': None}
TEST.api_q_floating_ips.add(fip_dict)
TEST.q_floating_ips.add(neutron.FloatingIp(fip_dict))
# associated (with compute port on 1st network)
fip_dict = {'tenant_id': '1',
'floating_ip_address': '172.16.88.228',
'floating_network_id': ext_net['id'],
'id': 'a97af8f2-3149-4b97-abbd-e49ad19510f7',
'fixed_ip_address': assoc_port['fixed_ips'][0]['ip_address'],
'port_id': assoc_port['id'],
'router_id': router_dict['id']}
TEST.api_q_floating_ips.add(fip_dict)
TEST.q_floating_ips.add(neutron.FloatingIp(fip_dict))
#------------------------------------------------------------
# security group
sec_group_1 = {'tenant_id': '1',
'description': 'default',
'id': 'faad7c80-3b62-4440-967c-13808c37131d',
'name': 'default'}
sec_group_2 = {'tenant_id': '1',
'description': 'NotDefault',
'id': '27a5c9a1-bdbb-48ac-833a-2e4b5f54b31d',
'name': 'other_group'}
sec_group_3 = {'tenant_id': '1',
'description': 'NotDefault',
'id': '443a4d7a-4bd2-4474-9a77-02b35c9f8c95',
'name': 'another_group'}
def add_rule_to_group(secgroup, default_only=True):
rule_egress_ipv4 = {
'id': str(uuid.uuid4()),
'direction': u'egress', 'ethertype': u'IPv4',
'port_range_min': None, 'port_range_max': None,
'protocol': None, 'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_egress_ipv6 = {
'id': str(uuid.uuid4()),
'direction': u'egress', 'ethertype': u'IPv6',
'port_range_min': None, 'port_range_max': None,
'protocol': None, 'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_tcp_80 = {
'id': str(uuid.uuid4()),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 80, 'port_range_max': 80,
'protocol': u'tcp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/0',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_icmp = {
'id': str(uuid.uuid4()),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 5, 'port_range_max': 8,
'protocol': u'icmp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/0',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_group = {
'id': str(uuid.uuid4()),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 80, 'port_range_max': 80,
'protocol': u'tcp', 'remote_group_id': sec_group_1['id'],
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_all_tcp = {
'id': str(uuid.uuid4()),
'direction': u'egress', 'ethertype': u'IPv4',
'port_range_min': 1, 'port_range_max': 65535,
'protocol': u'tcp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/24',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rules = []
if not default_only:
rules += [rule_tcp_80, rule_icmp, rule_group, rule_all_tcp]
rules += [rule_egress_ipv4, rule_egress_ipv6]
secgroup['security_group_rules'] = rules
add_rule_to_group(sec_group_1, default_only=False)
add_rule_to_group(sec_group_2)
add_rule_to_group(sec_group_3)
groups = [sec_group_1, sec_group_2, sec_group_3]
sg_name_dict = dict([(sg['id'], sg['name']) for sg in groups])
for sg in groups:
# Neutron API
TEST.api_q_secgroups.add(sg)
for rule in sg['security_group_rules']:
TEST.api_q_secgroup_rules.add(copy.copy(rule))
# OpenStack Dashboard internaly API
TEST.q_secgroups.add(
neutron.SecurityGroup(copy.deepcopy(sg), sg_name_dict))
for rule in sg['security_group_rules']:
TEST.q_secgroup_rules.add(
neutron.SecurityGroupRule(copy.copy(rule), sg_name_dict))
#------------------------------------------------------------
# LBaaS
# 1st pool
pool_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
'tenant_id': '1',
'vip_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'pool1',
'description': 'pool description',
'subnet_id': TEST.subnets.first().id,
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN',
'health_monitors': ['d4a0500f-db2b-4cc4-afcf-ec026febff96'],
'admin_state_up': True,
'status': 'ACTIVE',
'provider': 'haproxy'}
TEST.api_pools.add(pool_dict)
TEST.pools.add(lbaas.Pool(pool_dict))
# 1st vip
vip_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'vip1',
'address': '10.0.0.100',
'floatip_address': '',
'other_address': '10.0.0.100',
'description': 'vip description',
'subnet_id': TEST.subnets.first().id,
'subnet': TEST.subnets.first().cidr,
'protocol_port': 80,
'protocol': pool_dict['protocol'],
'pool_id': pool_dict['id'],
'session_persistence': {'type': 'APP_COOKIE',
'cookie_name': 'jssessionid'},
'connection_limit': 10,
'admin_state_up': True}
TEST.api_vips.add(vip_dict)
TEST.vips.add(lbaas.Vip(vip_dict))
# 2nd vip
vip_dict = {'id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
'name': 'vip2',
'address': '10.0.0.110',
'floatip_address': '',
'other_address': '10.0.0.110',
'description': 'vip description',
'subnet_id': TEST.subnets.first().id,
'subnet': TEST.subnets.first().cidr,
'protocol_port': 80,
'protocol': pool_dict['protocol'],
'pool_id': pool_dict['id'],
'session_persistence': {'type': 'APP_COOKIE',
'cookie_name': 'jssessionid'},
'connection_limit': 10,
'admin_state_up': True}
TEST.api_vips.add(vip_dict)
TEST.vips.add(lbaas.Vip(vip_dict))
# 1st member
member_dict = {'id': '78a46e5e-eb1a-418a-88c7-0e3f5968b08',
'tenant_id': '1',
'pool_id': pool_dict['id'],
'address': '10.0.0.11',
'protocol_port': 80,
'weight': 10,
'status': 'ACTIVE',
'admin_state_up': True}
TEST.api_members.add(member_dict)
TEST.members.add(lbaas.Member(member_dict))
# 2nd member
member_dict = {'id': '41ac1f8d-6d9c-49a4-a1bf-41955e651f91',
'tenant_id': '1',
'pool_id': pool_dict['id'],
'address': '10.0.0.12',
'protocol_port': 80,
'weight': 10,
'status': 'ACTIVE',
'admin_state_up': True}
TEST.api_members.add(member_dict)
TEST.members.add(lbaas.Member(member_dict))
# 2nd pool
pool_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d50',
'tenant_id': '1',
'vip_id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
'name': 'pool2',
'description': 'pool description',
'subnet_id': TEST.subnets.first().id,
'protocol': 'HTTPS',
'lb_method': 'ROUND_ROBIN',
'health_monitors': ['d4a0500f-db2b-4cc4-afcf-ec026febff97'],
'status': 'PENDING_CREATE',
'admin_state_up': True}
TEST.api_pools.add(pool_dict)
TEST.pools.add(lbaas.Pool(pool_dict))
# 1st monitor
monitor_dict = {'id': 'd4a0500f-db2b-4cc4-afcf-ec026febff96',
'type': 'ping',
'delay': 10,
'timeout': 10,
'max_retries': 10,
'http_method': 'GET',
'url_path': '/',
'expected_codes': '200',
'admin_state_up': True}
TEST.api_monitors.add(monitor_dict)
TEST.monitors.add(lbaas.PoolMonitor(monitor_dict))
# 2nd monitor
monitor_dict = {'id': 'd4a0500f-db2b-4cc4-afcf-ec026febff97',
'type': 'ping',
'delay': 10,
'timeout': 10,
'max_retries': 10,
'http_method': 'GET',
'url_path': '/',
'expected_codes': '200',
'admin_state_up': True}
TEST.api_monitors.add(monitor_dict)
TEST.monitors.add(lbaas.PoolMonitor(monitor_dict))
#------------------------------------------------------------
# Quotas
quota_data = {'network': '10',
'subnet': '10',
'port': '50',
'router': '10',
'floatingip': '50',
'security_group': '20',
'security_group_rule': '100',
}
TEST.neutron_quotas.add(base.QuotaSet(quota_data))
#------------------------------------------------------------
# Extensions
extension_1 = {"name": "security-group",
"alias": "security-group",
"description": "The security groups extension."}
extension_2 = {"name": "Quota management support",
"alias": "quotas",
"description": "Expose functions for quotas management"}
TEST.api_extensions.add(extension_1)
TEST.api_extensions.add(extension_2)
#------------------------------------------------------------
# 1st agent
agent_dict = {"binary": "neutron-openvswitch-agent",
"description": None,
"admin_state_up": True,
"heartbeat_timestamp": "2013-07-26 06:51:47",
"alive": True,
"id": "c876ff05-f440-443e-808c-1d34cda3e88a",
"topic": "N/A",
"host": "devstack001",
"agent_type": "Open vSwitch agent",
"started_at": "2013-07-26 05:23:28",
"created_at": "2013-07-26 05:23:28",
"configurations": {"devices": 2}}
TEST.api_agents.add(agent_dict)
TEST.agents.add(neutron.Agent(agent_dict))
# 2nd agent
agent_dict = {"binary": "neutron-dhcp-agent",
"description": None,
"admin_state_up": True,
"heartbeat_timestamp": "2013-07-26 06:51:48",
"alive": True,
"id": "f0d12e3d-1973-41a2-b977-b95693f9a8aa",
"topic": "dhcp_agent",
"host": "devstack001",
"agent_type": "DHCP agent",
"started_at": "2013-07-26 05:23:30",
"created_at": "2013-07-26 05:23:30",
"configurations": {
"subnets": 1,
"use_namespaces": True,
"dhcp_lease_duration": 120,
"dhcp_driver": "neutron.agent.linux.dhcp.Dnsmasq",
"networks": 1,
"ports": 1}}
TEST.api_agents.add(agent_dict)
TEST.agents.add(neutron.Agent(agent_dict))
#------------------------------------------------------------
# Service providers
provider_1 = {"service_type": "LOADBALANCER",
"name": "haproxy",
"default": True}
TEST.providers.add(provider_1)
#------------------------------------------------------------
# VPNaaS
# 1st VPNService
vpnservice_dict = {'id': '09a26949-6231-4f72-942a-0c8c0ddd4d61',
'tenant_id': '1',
'name': 'cloud_vpn1',
'description': 'vpn description',
'subnet_id': TEST.subnets.first().id,
'router_id': TEST.routers.first().id,
'vpn_type': 'ipsec',
'ipsecsiteconnections': [],
'admin_state_up': True,
'status': 'Active'}
TEST.api_vpnservices.add(vpnservice_dict)
TEST.vpnservices.add(vpn.VPNService(vpnservice_dict))
# 2nd VPNService
vpnservice_dict = {'id': '09a26949-6231-4f72-942a-0c8c0ddd4d62',
'tenant_id': '1',
'name': 'cloud_vpn2',
'description': 'vpn description',
'subnet_id': TEST.subnets.first().id,
'router_id': TEST.routers.first().id,
'vpn_type': 'ipsec',
'ipsecsiteconnections': [],
'admin_state_up': True,
'status': 'Active'}
TEST.api_vpnservices.add(vpnservice_dict)
TEST.vpnservices.add(vpn.VPNService(vpnservice_dict))
# 1st IKEPolicy
ikepolicy_dict = {'id': 'a1f009b7-0ffa-43a7-ba19-dcabb0b4c981',
'tenant_id': '1',
'name': 'ikepolicy_1',
'description': 'ikepolicy description',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-256',
'ike_version': 'v1',
'lifetime': {'units': 'seconds', 'value': 3600},
'phase1_negotiation_mode': 'main',
'pfs': 'group5'}
TEST.api_ikepolicies.add(ikepolicy_dict)
TEST.ikepolicies.add(vpn.IKEPolicy(ikepolicy_dict))
# 2nd IKEPolicy
ikepolicy_dict = {'id': 'a1f009b7-0ffa-43a7-ba19-dcabb0b4c982',
'tenant_id': '1',
'name': 'ikepolicy_2',
'description': 'ikepolicy description',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-256',
'ike_version': 'v1',
'lifetime': {'units': 'seconds', 'value': 3600},
'phase1_negotiation_mode': 'main',
'pfs': 'group5'}
TEST.api_ikepolicies.add(ikepolicy_dict)
TEST.ikepolicies.add(vpn.IKEPolicy(ikepolicy_dict))
# 1st IPSecPolicy
ipsecpolicy_dict = {'id': '8376e1dd-2b1c-4346-b23c-6989e75ecdb8',
'tenant_id': '1',
'name': 'ipsecpolicy_1',
'description': 'ipsecpolicy description',
'auth_algorithm': 'sha1',
'encapsulation_mode': 'tunnel',
'encryption_algorithm': '3des',
'lifetime': {'units': 'seconds', 'value': 3600},
'pfs': 'group5',
'transform_protocol': 'esp'}
TEST.api_ipsecpolicies.add(ipsecpolicy_dict)
TEST.ipsecpolicies.add(vpn.IPSecPolicy(ipsecpolicy_dict))
# 2nd IPSecPolicy
ipsecpolicy_dict = {'id': '8376e1dd-2b1c-4346-b23c-6989e75ecdb9',
'tenant_id': '1',
'name': 'ipsecpolicy_2',
'description': 'ipsecpolicy description',
'auth_algorithm': 'sha1',
'encapsulation_mode': 'tunnel',
'encryption_algorithm': '3des',
'lifetime': {'units': 'seconds', 'value': 3600},
'pfs': 'group5',
'transform_protocol': 'esp'}
TEST.api_ipsecpolicies.add(ipsecpolicy_dict)
TEST.ipsecpolicies.add(vpn.IPSecPolicy(ipsecpolicy_dict))
# 1st IPSecSiteConnection
ipsecsiteconnection_dict = {'id': 'dd1dd3a0-f349-49be-b013-245e147763d6',
'tenant_id': '1',
'name': 'ipsec_connection_1',
'description': 'vpn connection description',
'dpd': {'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': ikepolicy_dict['id'],
'initiator': 'bi-directional',
'ipsecpolicy_id': ipsecpolicy_dict['id'],
'mtu': 1500,
'peer_address':
'2607:f0d0:4545:3:200:f8ff:fe21:67cf',
'peer_cidrs': ['20.1.0.0/24', '21.1.0.0/24'],
'peer_id': '2607:f0d0:4545:3:200:f8ff:fe21:67cf',
'psk': 'secret',
'vpnservice_id': vpnservice_dict['id'],
'admin_state_up': True,
'status': 'Active'}
TEST.api_ipsecsiteconnections.add(ipsecsiteconnection_dict)
TEST.ipsecsiteconnections.add(
vpn.IPSecSiteConnection(ipsecsiteconnection_dict))
# 2nd IPSecSiteConnection
ipsecsiteconnection_dict = {'id': 'dd1dd3a0-f349-49be-b013-245e147763d7',
'tenant_id': '1',
'name': 'ipsec_connection_2',
'description': 'vpn connection description',
'dpd': {'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': ikepolicy_dict['id'],
'initiator': 'bi-directional',
'ipsecpolicy_id': ipsecpolicy_dict['id'],
'mtu': 1500,
'peer_address': '172.0.0.2',
'peer_cidrs': ['20.1.0.0/24'],
'peer_id': '172.0.0.2',
'psk': 'secret',
'vpnservice_id': vpnservice_dict['id'],
'admin_state_up': True,
'status': 'Active'}
TEST.api_ipsecsiteconnections.add(ipsecsiteconnection_dict)
TEST.ipsecsiteconnections.add(
vpn.IPSecSiteConnection(ipsecsiteconnection_dict))
# FWaaS
# 1st rule (used by 1st policy)
rule1_dict = {'id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
'tenant_id': '1',
'name': 'rule1',
'description': 'rule1 description',
'protocol': 'tcp',
'action': 'allow',
'source_ip_address': '1.2.3.0/24',
'source_port': '80',
'destination_ip_address': '4.5.6.7/32',
'destination_port': '1:65535',
'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'position': 1,
'shared': True,
'enabled': True}
TEST.api_fw_rules.add(rule1_dict)
rule1 = fwaas.Rule(copy.deepcopy(rule1_dict))
# NOTE: rule1['policy'] is set below
TEST.fw_rules.add(rule1)
# 2nd rule (used by 2nd policy; no name)
rule2_dict = {'id': 'c6298a93-850f-4f64-b78a-959fd4f1e5df',
'tenant_id': '1',
'name': '',
'description': '',
'protocol': 'udp',
'action': 'deny',
'source_ip_address': '1.2.3.0/24',
'source_port': '80',
'destination_ip_address': '4.5.6.7/32',
'destination_port': '1:65535',
'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'position': 2,
'shared': True,
'enabled': True}
TEST.api_fw_rules.add(rule2_dict)
rule2 = fwaas.Rule(copy.deepcopy(rule2_dict))
# NOTE: rule2['policy'] is set below
TEST.fw_rules.add(rule2)
# 3rd rule (not used by any policy)
rule3_dict = {'id': 'h0881d38-c3eb-4fee-9763-12de3338041d',
'tenant_id': '1',
'name': 'rule3',
'description': 'rule3 description',
'protocol': 'icmp',
'action': 'allow',
'source_ip_address': '1.2.3.0/24',
'source_port': '80',
'destination_ip_address': '4.5.6.7/32',
'destination_port': '1:65535',
'firewall_policy_id': None,
'position': None,
'shared': True,
'enabled': True}
TEST.api_fw_rules.add(rule3_dict)
rule3 = fwaas.Rule(copy.deepcopy(rule3_dict))
# rule3 is not associated with any rules
rule3._apidict['policy'] = None
TEST.fw_rules.add(rule3)
# 1st policy (associated with 2 rules)
policy1_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'tenant_id': '1',
'name': 'policy1',
'description': 'policy with two rules',
'firewall_rules': [rule1_dict['id'], rule2_dict['id']],
'audited': True,
'shared': True}
TEST.api_fw_policies.add(policy1_dict)
policy1 = fwaas.Policy(copy.deepcopy(policy1_dict))
policy1._apidict['rules'] = [rule1, rule2]
TEST.fw_policies.add(policy1)
# Reverse relations (rule -> policy)
rule1._apidict['policy'] = policy1
rule2._apidict['policy'] = policy1
# 2nd policy (associated with no rules; no name)
policy2_dict = {'id': 'cf50b331-787a-4623-825e-da794c918d6a',
'tenant_id': '1',
'name': '',
'description': '',
'firewall_rules': [],
'audited': False,
'shared': False}
TEST.api_fw_policies.add(policy2_dict)
policy2 = fwaas.Policy(copy.deepcopy(policy2_dict))
policy2._apidict['rules'] = []
TEST.fw_policies.add(policy2)
# 1st firewall
fw1_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
'tenant_id': '1',
'firewall_policy_id':
'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'firewall1',
'description': 'firewall description',
'status': 'PENDING_CREATE',
'shared': True,
'admin_state_up': True}
TEST.api_firewalls.add(fw1_dict)
fw1 = fwaas.Firewall(copy.deepcopy(fw1_dict))
fw1._apidict['policy'] = policy1
TEST.firewalls.add(fw1)
# 2nd firewall (no name)
fw2_dict = {'id': '1aa75150-415f-458e-bae5-5a362a4fb1f7',
'tenant_id': '1',
'firewall_policy_id':
'abcdef-c3eb-4fee-9763-12de3338041e',
'name': '',
'description': '',
'status': 'PENDING_CREATE',
'shared': True,
'admin_state_up': True}
TEST.api_firewalls.add(fw1_dict)
fw2 = fwaas.Firewall(copy.deepcopy(fw2_dict))
fw2._apidict['policy'] = policy1
TEST.firewalls.add(fw1)
|
|
import numpy as np
'''
Library containing pulse shapes.
'''
from pycqed.measurement.waveform_control.pulse import Pulse, apply_modulation
from pycqed.utilities.general import int_to_bin
class MW_IQmod_pulse(Pulse):
'''
Block pulse on the I channel modulated with IQ modulation.
kwargs:
amplitude (V)
length (s)
mod_frequency (Hz)
phase (deg)
phaselock (bool)
I_env is a block pulse
transformation:
[I_mod] = [cos(wt+phi) 0] [I_env]
[Q_mod] [-sin(wt+phi) 0] [0]
'''
def __init__(self, name, I_channel, Q_channel, **kw):
super().__init__(name)
self.I_channel = I_channel
self.Q_channel = Q_channel
self.channels = [I_channel, Q_channel]
self.mod_frequency = kw.pop('mod_frequency', 1e6)
self.amplitude = kw.pop('amplitude', 0.1)
self.length = kw.pop('length', 1e-6)
self.phase = kw.pop('phase', 0.)
self.phaselock = kw.pop('phaselock', True)
self.alpha = kw.pop('alpha', 1)
self.phi_skew = kw.pop('phi_skew', 0)
def __call__(self, **kw):
self.mod_frequency = kw.pop('mod_frequency', self.mod_frequency)
self.amplitude = kw.pop('amplitude', self.amplitude)
self.length = kw.pop('length', self.length)
self.phase = kw.pop('phase', self.phase)
self.phaselock = kw.pop('phaselock', self.phaselock)
self.alpha = kw.pop('alpha', self.alpha)
self.phi_skew = kw.pop('phi_skew', self.phi_skew)
return self
def chan_wf(self, chan, tvals):
idx0 = np.where(tvals >= tvals[0])[0][0]
idx1 = np.where(tvals <= tvals[0] + self.length)[0][-1] + 1
wf = np.zeros(len(tvals))
if not self.phaselock:
tvals = tvals.copy() - tvals[idx0]
I_mod, Q_mod = apply_modulation(
self.amplitude*np.ones(len(tvals)),
np.zeros(len(tvals)), tvals[idx0:idx1],
mod_frequency=self.mod_frequency, phase=self.phase,
phi_skew=self.phi_skew, alpha=self.alpha)
if chan == self.I_channel:
wf[idx0:idx1] += I_mod
elif chan == self.Q_channel:
wf[idx0:idx1] += Q_mod
return wf
class SSB_DRAG_pulse(Pulse):
'''
Gauss pulse on the I channel, derivative of Gauss on the Q channel.
modulated with Single Sideband (SSB) modulation.
Required arguments:
name (str) : base name of the pulse
I_channel (str) : name of the channel on which to act (as defined in pular)
Q_channel (str) : " "
kwargs:
amplitude (V)
sigma (s)
nr_sigma (int) (default=4)
motzoi ( ) (default=0)
mod_frequency (Hz)
phase (deg)
phaselock (bool)
alpha (arb. units): QI amplitude
phi_skew (deg) : phase skewness
I_env is a gaussian
Q_env is the derivative of a gaussian
The envelope is transformation:
Signal = predistortion * modulation * envelope
See Leo's notes on mixer predistortion in the docs for details
[I_mod] = [1 tan(phi-skew)] [cos(wt+phi) sin(wt+phi)] [I_env]
[Q_mod] [0 sec(phi-skew)/alpha] [-sin(wt+phi) cos(wt+phi)] [Q_env]
The predistortion * modulation matrix is implemented in a single step using
the following matrix
M*mod = [cos(x)-tan(phi-skew)sin(x) sin(x)+tan(phi-skew)cos(x) ]
[-sin(x)sec(phi-skew)/alpha cos(x)sec(phi-skew)/alpha]
where: x = wt+phi
Reduces to a Gaussian pulse if motzoi == 0
Reduces to an unmodulated pulse if mod_frequency == 0
'''
def __init__(self, name, I_channel, Q_channel, **kw):
super().__init__(name)
self.I_channel = I_channel
self.Q_channel = Q_channel
self.channels = [I_channel, Q_channel]
self.amplitude = kw.pop('amplitude', 0.1)
self.sigma = kw.pop('sigma', 0.25e-6)
self.nr_sigma = kw.pop('nr_sigma', 4)
self.motzoi = kw.pop('motzoi', 0)
self.mod_frequency = kw.pop('mod_frequency', 1e6)
self.phase = kw.pop('phase', 0.)
self.phaselock = kw.pop('phaselock', True)
self.alpha = kw.pop('alpha', 1) # QI amp ratio
self.phi_skew = kw.pop('phi_skew', 0) # IQ phase skewness
self.length = self.sigma * self.nr_sigma
def __call__(self, **kw):
self.amplitude = kw.pop('amplitude', self.amplitude)
self.sigma = kw.pop('sigma', self.sigma)
self.nr_sigma = kw.pop('nr_sigma', self.nr_sigma)
self.motzoi = kw.pop('motzoi', self.motzoi)
self.mod_frequency = kw.pop('mod_frequency', self.mod_frequency)
self.phase = kw.pop('phase', self.phase)
self.phaselock = kw.pop('phaselock', self.phaselock)
self.length = self.sigma * self.nr_sigma
return self
def chan_wf(self, chan, tvals):
idx0 = np.where(tvals >= tvals[0])[0][0]
idx1 = np.where(tvals <= tvals[0] + self.length)[0][-1] + 1
wf = np.zeros(len(tvals))
t = tvals - tvals[0] # Gauss envelope should not be displaced
mu = self.length/2.0
if not self.phaselock:
tvals = tvals.copy() - tvals[idx0]
gauss_env = self.amplitude*np.exp(-(0.5 * ((t-mu)**2) / self.sigma**2))
deriv_gauss_env = self.motzoi * -1 * (t-mu)/(self.sigma**1) * gauss_env
# substract offsets
gauss_env -= (gauss_env[0]+gauss_env[-1])/2.
deriv_gauss_env -= (deriv_gauss_env[0]+deriv_gauss_env[-1])/2.
# Note prefactor is multiplied by self.sigma to normalize
if chan == self.I_channel:
I_mod, Q_mod = apply_modulation(gauss_env, deriv_gauss_env,
tvals[idx0:idx1],
mod_frequency=self.mod_frequency,
phase=self.phase,
phi_skew=self.phi_skew,
alpha=self.alpha)
wf[idx0:idx1] += I_mod
if chan == self.Q_channel:
I_mod, Q_mod = apply_modulation(gauss_env, deriv_gauss_env,
tvals[idx0:idx1],
mod_frequency=self.mod_frequency,
phase=self.phase,
phi_skew=self.phi_skew,
alpha=self.alpha)
wf[idx0:idx1] += Q_mod
return wf
class Mux_DRAG_pulse(SSB_DRAG_pulse):
'''
Uses 4 AWG channels to play a multiplexer compatible SSB DRAG pulse
uses channels GI and GQ (default 1 and 2) for the SSB-modulated gaussian
and uses channels DI and DQ (default 3 and 4) for the modulated derivative
components.
'''
def __init__(self, name, GI_channel='ch1', GQ_channel='ch2',
DI_channel='ch3', DQ_channel='ch4', **kw):
# Ideally I'd use grandparent inheritance here but I couldn't get it
# to work
self.name = name
self.start_offset = 0
self.stop_offset = 0
self._t0 = None
self._clock = None
self.GI_channel = GI_channel
self.GQ_channel = GQ_channel
self.DI_channel = DI_channel
self.DQ_channel = DQ_channel
self.channels = [GI_channel, GQ_channel, DI_channel, DQ_channel]
self.amplitude = kw.pop('amplitude', 0.1)
self.sigma = kw.pop('sigma', 0.25e-6)
self.nr_sigma = kw.pop('nr_sigma', 4)
self.motzoi = kw.pop('motzoi', 1)
self.mod_frequency = kw.pop('mod_frequency', 1e6)
self.phase = kw.pop('phase', 0.)
self.phaselock = kw.pop('phaselock', True)
# skewness parameters
self.G_alpha = kw.pop('G_alpha', 1) # QI amp ratio of Gauss
self.G_phi_skew = kw.pop('G_phi_skew', 0) # IQ phase skewness of Gauss
self.D_alpha = kw.pop('D_alpha', 1) # QI amp ratio of deriv
self.D_phi_skew = kw.pop('D_phi_skew', 0) # IQ phase skewness of deriv
self.length = self.sigma * self.nr_sigma
def __call__(self, **kw):
self.GI_channel = kw.pop('GI_channel', self.GI_channel)
self.GQ_channel = kw.pop('GQ_channel', self.GQ_channel)
self.DI_channel = kw.pop('DI_channel', self.DI_channel)
self.DQ_channel = kw.pop('DQ_channel', self.DQ_channel)
self.channels = [self.GI_channel, self.GQ_channel,
self.DI_channel, self.DQ_channel]
self.amplitude = kw.pop('amplitude', self.amplitude)
self.sigma = kw.pop('sigma', self.sigma)
self.nr_sigma = kw.pop('nr_sigma', self.nr_sigma)
self.mod_frequency = kw.pop('mod_frequency', self.mod_frequency)
self.phase = kw.pop('phase', self.phase)
self.phaselock = kw.pop('phaselock', self.phaselock)
# skewness parameters
self.G_alpha = kw.pop('G_alpha', self.G_alpha) # QI amp ratio
self.G_phi_skew = kw.pop(
'G_phi_skew', self.G_phi_skew) # IQ phase skewness
self.D_alpha = kw.pop('D_alpha', self.D_alpha)
self.D_phi_skew = kw.pop('D_phi_skew', self.D_phi_skew)
self.length = self.sigma * self.nr_sigma
return self
def chan_wf(self, chan, tvals):
idx0 = np.where(tvals >= tvals[0])[0][0]
idx1 = np.where(tvals <= tvals[0] + self.length)[0][-1] + 1
wf = np.zeros(len(tvals))
t = tvals - tvals[0] # Gauss envelope should not be displaced
mu = self.length/2.0
if not self.phaselock:
tvals = tvals.copy() - tvals[idx0]
gauss_env = self.amplitude*np.exp(-(0.5 * ((t-mu)**2) / self.sigma**2))
if chan in [self.GI_channel, self.GQ_channel]:
gauss_env -= (gauss_env[0]+gauss_env[-1])/2.
I_mod, Q_mod = apply_modulation(gauss_env,
np.zeros(len(tvals)),
tvals[idx0:idx1],
mod_frequency=self.mod_frequency,
phase=self.phase,
phi_skew=self.G_phi_skew,
alpha=self.G_alpha)
if chan == self.GI_channel:
wf[idx0:idx1] += I_mod
else:
wf[idx0:idx1] += Q_mod
elif chan in [self.DI_channel, self.DQ_channel]:
der_env = self.motzoi * -1 * (t-mu)/(self.sigma**1) * gauss_env
der_env -= (der_env[0]+der_env[-1])/2.
I_mod, Q_mod = apply_modulation(np.zeros(len(tvals)), der_env,
tvals[idx0:idx1],
mod_frequency=self.mod_frequency,
phase=self.phase,
phi_skew=self.D_phi_skew,
alpha=self.D_alpha)
if chan == self.DI_channel:
wf[idx0:idx1] += I_mod
else:
wf[idx0:idx1] += Q_mod
return wf
# Some simple pulse definitions.
class SquareFluxPulse(Pulse):
def __init__(self, channel=None, channels=None, name='square flux pulse',
**kw):
Pulse.__init__(self, name)
if channel is None and channels is None:
raise ValueError('Must specify either channel or channels')
elif channels is None:
self.channel = channel # this is just for convenience, internally
# this is the part the sequencer element wants to communicate with
self.channels.append(channel)
else:
self.channels = channels
self.amplitude = kw.pop('amplitude', 0)
self.square_pulse_length = kw.pop('square_pulse_length', 0)
self.pulse_buffer = kw.pop('pulse_buffer', 0)
self.length = self.square_pulse_length + self.pulse_buffer
self.kernel_path = kw.get('kernel_path', None)
if self.kernel_path is not None:
kernelvec = np.loadtxt(self.kernel_path)
self.kernel = np.zeros((len(kernelvec), len(kernelvec)))
for i in range(len(kernelvec)):
for j in range(len(kernelvec)):
self.kernel[i, j] = kernelvec[i-j]
del(kernelvec)
else:
ValueError('Must specify kernel path')
def __call__(self, **kw):
self.amplitude = kw.pop('amplitude', self.amplitude)
self.square_pulse_length = kw.pop('square_pulse_length',
self.square_pulse_length)
self.pulse_buffer = kw.pop('pulse_buffer',
self.pulse_buffer)
self.length = self.square_pulse_length + self.pulse_buffer
self.channel = kw.pop('channel', self.channel)
self.channels = kw.pop('channels', self.channels)
self.channels.append(self.channel)
return self
def chan_wf(self, chan, tvals):
sq_pulse = np.ones(
int(round((self.square_pulse_length)*1e9))) * self.amplitude
buff_pulse = np.zeros(int(
round((self.length-self.square_pulse_length)*1e9)))
return np.concatenate([sq_pulse, buff_pulse])
class MartinisFluxPulse(Pulse):
pass
class QWG_Codeword(Pulse):
def __init__(self,
channel_map=None,
cw_trigger_channel='ch1_marker_1',
cw_channels=['ch1_marker_2', 'ch1_marker_2',
'ch2_marker_1'],
codeword=0, length=20e-9,
name='QWG codeword', **kw):
Pulse.__init__(self, name)
self.amplitude = kw.pop('amplitude', 0)
self.length = length
self.cw_channels = cw_channels
self.cw_trigger_channel = cw_trigger_channel
self.set_codeword(codeword)
self.channels = self.cw_channels + [self.cw_trigger_channel]
# def __call__(self, **kw):
# self.amplitude = kw.pop('amplitude', self.amplitude)
# self.length = kw.pop('length', self.length)
# return self
def set_codeword(self, codeword):
self.cw_high_channels = []
self.cw_low_channels = []
bin_cw = int_to_bin(codeword, w=len(self.cw_channels), lsb_last=False)
for i, chan in enumerate(self.cw_channels):
if bin_cw[i] == '1':
self.cw_high_channels.append(chan)
else:
self.cw_low_channels.append(chan)
self.codeword = codeword
def chan_wf(self, chan, tvals):
if chan in self.cw_high_channels:
return np.ones(len(tvals)) * self.amplitude
if chan in self.cw_low_channels:
return np.zeros(len(tvals))
if chan == self.cw_trigger_channel:
amps = np.zeros(len(tvals))
amps[len(tvals)//2:] = 1
return amps
|
|
"""
Support for installing and building the "wheel" binary package format.
"""
from __future__ import with_statement
import compileall
import csv
import functools
import hashlib
import os
import re
import shutil
import sys
from base64 import urlsafe_b64encode
from pip.backwardcompat import ConfigParser, StringIO
from pip.exceptions import InvalidWheelFilename
from pip.locations import distutils_scheme
from pip.log import logger
from pip import pep425tags
from pip.util import call_subprocess, normalize_path, make_path_relative
from pip._vendor import pkg_resources
from pip._vendor.distlib.scripts import ScriptMaker
wheel_ext = '.whl'
def rehash(path, algo='sha256', blocksize=1<<20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
block = f.read(blocksize)
while block:
length += len(block)
h.update(block)
block = f.read(blocksize)
digest = 'sha256='+urlsafe_b64encode(h.digest()).decode('latin1').rstrip('=')
return (digest, length)
try:
unicode
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
script = open(path, 'rb')
try:
firstline = script.readline()
if not firstline.startswith(binary('#!python')):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = binary('#!') + exename + binary(os.linesep)
rest = script.read()
finally:
script.close()
script = open(path, 'wb')
try:
script.write(firstline)
script.write(rest)
finally:
script.close()
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = ConfigParser.RawConfigParser()
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True):
"""Install a wheel"""
scheme = distutils_scheme(name, user=user, home=home, root=root)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
compileall.compile_dir(source, force=True, quiet=True)
def normpath(src, p):
return make_path_relative(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
if not os.path.exists(dest): # common for the 'include' path
os.makedirs(dest)
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base
and s.endswith('.dist-info')
# is self.req.project_name case preserving?
and s.lower().startswith(req.project_name.replace('-', '_').lower())):
assert not info_dir, 'Multiple .dist-info directories'
info_dir.append(destsubdir)
if not os.path.exists(destsubdir):
os.makedirs(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
shutil.move(srcfile, destfile)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadat 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [k for k in console
if re.match(r'easy_install(-\d\.\d)?$', k)]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(maker.make_multiple(['%s = %s' % kv for kv in console.items()]))
if len(gui) > 0:
generated.extend(maker.make_multiple(['%s = %s' % kv for kv in gui.items()], {'gui': True}))
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((f, h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.util import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base+'.pyc')
yield path
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename("%s is not a valid wheel filename." % filename)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set((x, y, z) for x in self.pyversions for y
in self.abis for z in self.plats)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, wheel_dir, build_options=[], global_options=[]):
self.requirement_set = requirement_set
self.finder = finder
self.wheel_dir = normalize_path(wheel_dir)
self.build_options = build_options
self.global_options = global_options
def _build_one(self, req):
"""Build one wheel."""
base_args = [
sys.executable, '-c',
"import setuptools;__file__=%r;"\
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % req.setup_py] + \
list(self.global_options)
logger.notify('Running setup.py bdist_wheel for %s' % req.name)
logger.notify('Destination directory: %s' % self.wheel_dir)
wheel_args = base_args + ['bdist_wheel', '-d', self.wheel_dir] + self.build_options
try:
call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed building wheel for %s' % req.name)
return False
def build(self):
"""Build wheels."""
#unpack and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
#make the wheelhouse
if not os.path.exists(self.wheel_dir):
os.makedirs(self.wheel_dir)
#build the wheels
logger.notify('Building wheels for collected packages: %s' % ', '.join([req.name for req in reqset]))
logger.indent += 2
build_success, build_failure = [], []
for req in reqset:
if req.is_wheel:
logger.notify("Skipping building wheel: %s", req.url)
continue
if self._build_one(req):
build_success.append(req)
else:
build_failure.append(req)
logger.indent -= 2
#notify sucess/failure
if build_success:
logger.notify('Successfully built %s' % ' '.join([req.name for req in build_success]))
if build_failure:
logger.notify('Failed to build %s' % ' '.join([req.name for req in build_failure]))
|
|
import os
import textwrap
import unittest
from parameterized import parameterized
from conans import __version__ as client_version
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient
from conans.tools import save
from conans.util.files import load
class NewCommandTest(unittest.TestCase):
def test_template(self):
client = TestClient()
template1 = textwrap.dedent("""
class {{package_name}}Conan(ConanFile):
name = "{{name}}"
version = "{{version}}"
conan_version = "{{conan_version}}"
""")
save(os.path.join(client.cache_folder, "templates/mytemplate.py"), template1)
client.run("new hello/0.1 --template=mytemplate.py")
conanfile = client.load("conanfile.py")
self.assertIn("class HelloConan(ConanFile):", conanfile)
self.assertIn('name = "hello"', conanfile)
self.assertIn('version = "0.1"', conanfile)
self.assertIn('conan_version = "{}"'.format(client_version), conanfile)
def test_template_custom_definitions(self):
client = TestClient()
template1 = textwrap.dedent("""
class {{package_name}}Conan(ConanFile):
name = "{{name}}"
version = "{{version}}"
conan_version = "{{conan_version}}"
license = "{{license}}"
homepage = "{{homepage}}"
""")
save(os.path.join(client.cache_folder, "templates/mytemplate.py"), template1)
client.run("new hello/0.1 --template=mytemplate.py "
"-d license=MIT -d homepage=http://example.com")
conanfile = client.load("conanfile.py")
self.assertIn("class HelloConan(ConanFile):", conanfile)
self.assertIn('name = "hello"', conanfile)
self.assertIn('version = "0.1"', conanfile)
self.assertIn('conan_version = "{}"'.format(client_version), conanfile)
self.assertIn('license = "MIT"', conanfile)
self.assertIn('homepage = "http://example.com"', conanfile)
def test_template_dir(self):
client = TestClient()
template_dir = "templates/command/new/t_dir"
template_recipe = textwrap.dedent("""
class {{package_name}}Conan(ConanFile):
name = "{{name}}"
version = "{{version}}"
conan_version = "{{conan_version}}"
""")
save(os.path.join(client.cache_folder, template_dir + "/conanfile.py"), template_recipe)
template_txt = textwrap.dedent("""
package_name={{package_name}}
name={{name}}
version={{version}}
conan_version={{conan_version}}
""")
save(os.path.join(client.cache_folder, template_dir + "/{{name}}/hello.txt"), template_txt)
client.run("new hello/0.1 --template=t_dir")
conanfile = client.load("conanfile.py")
self.assertIn("class HelloConan(ConanFile):", conanfile)
self.assertIn('name = "hello"', conanfile)
self.assertIn('version = "0.1"', conanfile)
self.assertIn('conan_version = "{}"'.format(client_version), conanfile)
hellotxt = client.load("hello/hello.txt")
self.assertIn("package_name=Hello", hellotxt)
self.assertIn("name=hello", hellotxt)
self.assertIn('version=0.1', hellotxt)
self.assertIn("conan_version={}".format(client_version), hellotxt)
def test_template_test_package(self):
client = TestClient()
template2 = textwrap.dedent("""
class {{package_name}}Conan(ConanFile):
version = "fixed"
""")
save(os.path.join(client.cache_folder, "templates", "subfolder", "mytemplate.py"),
template2)
client.run("new hello/0.1 -m=subfolder/mytemplate.py")
conanfile = client.load("conanfile.py")
self.assertIn("class HelloConan(ConanFile):", conanfile)
self.assertIn('version = "fixed"', conanfile)
def test_template_abs_path_test_package(self):
client = TestClient()
template2 = textwrap.dedent("""
class {{package_name}}Conan(ConanFile):
version = "fixed"
""")
tmp = temp_folder()
full_path = os.path.join(tmp, "templates", "subfolder", "mytemplate.py")
save(full_path, template2)
client.run('new hello/0.1 --template="%s"' % full_path)
conanfile = client.load("conanfile.py")
self.assertIn("class HelloConan(ConanFile):", conanfile)
self.assertIn('version = "fixed"', conanfile)
def test_template_errors(self):
client = TestClient()
client.run("new hello/0.1 -m=mytemplate.py", assert_error=True)
self.assertIn("ERROR: Template doesn't exist", client.out)
client.run("new hello/0.1 -m=mytemplate", assert_error=True)
self.assertIn("ERROR: Template doesn't exist", client.out)
client.run("new hello/0.1 --template=mytemplate.py --bare", assert_error=True)
self.assertIn("ERROR: 'template' is incompatible", client.out)
client.run("new hello/0.1 --template", assert_error=True)
self.assertIn("ERROR: Exiting with code: 2", client.out)
def test_new(self):
client = TestClient()
client.run('new MyPackage/1.3@myuser/testing -t')
root = client.current_folder
self.assertTrue(os.path.exists(os.path.join(root, "conanfile.py")))
content = load(os.path.join(root, "conanfile.py"))
self.assertIn('name = "MyPackage"', content)
self.assertIn('version = "1.3"', content)
self.assertTrue(os.path.exists(os.path.join(root, "test_package/conanfile.py")))
self.assertTrue(os.path.exists(os.path.join(root, "test_package/CMakeLists.txt")))
self.assertTrue(os.path.exists(os.path.join(root, "test_package/example.cpp")))
# assert they are correct at least
client.run("export . myuser/testing")
client.run("search")
self.assertIn("MyPackage/1.3@myuser/testing", client.out)
def test_new_error(self):
""" packages with short name
"""
client = TestClient()
client.run('new A/1.3@myuser/testing', assert_error=True)
self.assertIn("ERROR: Value provided for package name, 'A' (type str), is too short. Valid "
"names must contain at least 2 characters.", client.out)
client.run('new A2/1.3@myuser/u', assert_error=True)
self.assertIn("ERROR: Value provided for channel, 'u' (type str), is too short. Valid "
"names must contain at least 2 characters.", client.out)
@parameterized.expand([("My-Package", "MyPackage"),
("my-package", "MyPackage"),
("my_package", "MyPackage"),
("my.Package", "MyPackage"),
("my+package", "MyPackage")])
def test_naming(self, package_name, python_class_name):
""" packages with dash
"""
client = TestClient()
client.run('new {}/1.3@myuser/testing -t'.format(package_name))
root = client.current_folder
self.assertTrue(os.path.exists(os.path.join(root, "conanfile.py")))
content = load(os.path.join(root, "conanfile.py"))
self.assertIn('class {}Conan(ConanFile):'.format(python_class_name), content)
self.assertIn('name = "{}"'.format(package_name), content)
self.assertIn('version = "1.3"', content)
self.assertTrue(os.path.exists(os.path.join(root, "test_package/conanfile.py")))
self.assertTrue(os.path.exists(os.path.join(root, "test_package/CMakeLists.txt")))
self.assertTrue(os.path.exists(os.path.join(root, "test_package/example.cpp")))
# assert they are correct at least
client.run("export . myuser/testing")
client.run("search")
self.assertIn("{}/1.3@myuser/testing".format(package_name), client.out)
def test_new_header(self):
client = TestClient()
client.run('new MyPackage/1.3 -t -i')
root = client.current_folder
self.assertTrue(os.path.exists(os.path.join(root, "conanfile.py")))
content = load(os.path.join(root, "conanfile.py"))
self.assertIn('name = "MyPackage"', content)
self.assertIn('version = "1.3"', content)
self.assertIn('topics = (', content)
self.assertNotIn('homepage', content)
self.assertTrue(os.path.exists(os.path.join(root, "test_package/conanfile.py")))
self.assertTrue(os.path.exists(os.path.join(root, "test_package/CMakeLists.txt")))
self.assertTrue(os.path.exists(os.path.join(root, "test_package/example.cpp")))
# assert they are correct at least
client.run("export . myuser/testing")
client.run("search")
self.assertIn("MyPackage/1.3@myuser/testing", client.out)
def test_new_sources(self):
client = TestClient()
client.run('new MyPackage/1.3@myuser/testing -t -s')
root = client.current_folder
self.assertTrue(os.path.exists(os.path.join(root, "conanfile.py")))
content = load(os.path.join(root, "conanfile.py"))
self.assertIn('name = "MyPackage"', content)
self.assertIn('version = "1.3"', content)
self.assertIn('exports_sources', content)
self.assertIn('topics = (', content)
self.assertNotIn('homepage', content)
self.assertNotIn('source()', content)
# assert they are correct at least
client.run("export . myuser/testing")
client.run("search")
self.assertIn("MyPackage/1.3@myuser/testing", client.out)
def test_new_purec(self):
client = TestClient()
client.run('new MyPackage/1.3@myuser/testing -c -t --source')
root = client.current_folder
self.assertTrue(os.path.exists(os.path.join(root, "conanfile.py")))
content = load(os.path.join(root, "conanfile.py"))
self.assertIn('name = "MyPackage"', content)
self.assertIn('version = "1.3"', content)
self.assertIn('topics = (', content)
self.assertNotIn('homepage', content)
# assert they are correct at least
client.run("export . myuser/testing")
client.run("search")
self.assertIn("MyPackage/1.3@myuser/testing", client.out)
def test_new_without(self):
client = TestClient()
client.run('new MyPackage/1.3@myuser/testing')
root = client.current_folder
self.assertTrue(os.path.exists(os.path.join(root, "conanfile.py")))
self.assertFalse(os.path.exists(os.path.join(root, "test_package/conanfile.py")))
self.assertFalse(os.path.exists(os.path.join(root, "test_package/CMakeLists.txt")))
self.assertFalse(os.path.exists(os.path.join(root, "test_package/example.cpp")))
def test_new_ci(self):
client = TestClient()
client.run('new MyPackage/1.3@myuser/testing -cis -ciw -cilg -cilc -cio -ciglg -ciglc '
'-ciccg -ciccc -cicco -ciu=myurl')
root = client.current_folder
build_py = load(os.path.join(root, "build.py"))
self.assertIn('builder.add_common_builds(shared_option_name="MyPackage:shared")',
build_py)
self.assertNotIn('visual_versions=', build_py)
self.assertNotIn('gcc_versions=', build_py)
self.assertNotIn('clang_versions=', build_py)
self.assertNotIn('apple_clang_versions=', build_py)
self.assertNotIn('gitlab_gcc_versions=', build_py)
self.assertNotIn('gitlab_clang_versions=', build_py)
self.assertNotIn('circleci_gcc_versions=', build_py)
self.assertNotIn('circleci_clang_versions=', build_py)
self.assertNotIn('circleci_osx_versions=', build_py)
appveyor = load(os.path.join(root, "appveyor.yml"))
self.assertIn("CONAN_UPLOAD: \"myurl\"", appveyor)
self.assertIn('CONAN_REFERENCE: "MyPackage/1.3"', appveyor)
self.assertIn('CONAN_USERNAME: "myuser"', appveyor)
self.assertIn('CONAN_CHANNEL: "testing"', appveyor)
self.assertIn(r'PYTHON: "C:\\Python37"', appveyor)
self.assertIn('CONAN_VISUAL_VERSIONS: 12', appveyor)
self.assertIn('CONAN_VISUAL_VERSIONS: 14', appveyor)
self.assertIn('CONAN_VISUAL_VERSIONS: 15', appveyor)
travis = load(os.path.join(root, ".travis.yml"))
self.assertIn("- CONAN_UPLOAD: \"myurl\"", travis)
self.assertIn('- CONAN_REFERENCE: "MyPackage/1.3"', travis)
self.assertIn('- CONAN_USERNAME: "myuser"', travis)
self.assertIn('- CONAN_CHANNEL: "testing"', travis)
self.assertIn('env: CONAN_GCC_VERSIONS=5 CONAN_DOCKER_IMAGE=conanio/gcc5',
travis)
gitlab = load(os.path.join(root, ".gitlab-ci.yml"))
self.assertIn("CONAN_UPLOAD: \"myurl\"", gitlab)
self.assertIn('CONAN_REFERENCE: "MyPackage/1.3"', gitlab)
self.assertIn('CONAN_USERNAME: "myuser"', gitlab)
self.assertIn('CONAN_CHANNEL: "testing"', gitlab)
self.assertIn('CONAN_GCC_VERSIONS: "5"', gitlab)
circleci = load(os.path.join(root, ".circleci", "config.yml"))
self.assertIn("CONAN_UPLOAD: \"myurl\"", circleci)
self.assertIn('CONAN_REFERENCE: "MyPackage/1.3"', circleci)
self.assertIn('CONAN_USERNAME: "myuser"', circleci)
self.assertIn('CONAN_CHANNEL: "testing"', circleci)
self.assertIn('CONAN_GCC_VERSIONS: "5"', circleci)
def test_new_ci_partial(self):
client = TestClient()
root = client.current_folder
client.run('new MyPackage/1.3@myuser/testing -cis', assert_error=True)
client.run('new MyPackage/1.3@myuser/testing -cilg')
self.assertTrue(os.path.exists(os.path.join(root, "build.py")))
self.assertTrue(os.path.exists(os.path.join(root, ".travis.yml")))
self.assertTrue(os.path.exists(os.path.join(root, ".travis/install.sh")))
self.assertTrue(os.path.exists(os.path.join(root, ".travis/run.sh")))
self.assertFalse(os.path.exists(os.path.join(root, "appveyor.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".gitlab-ci.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".circleci/config.yml")))
client = TestClient()
root = client.current_folder
client.run('new MyPackage/1.3@myuser/testing -ciw')
self.assertTrue(os.path.exists(os.path.join(root, "build.py")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis/install.sh")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis/run.sh")))
self.assertTrue(os.path.exists(os.path.join(root, "appveyor.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".gitlab-ci.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".circleci/config.yml")))
client = TestClient()
root = client.current_folder
client.run('new MyPackage/1.3@myuser/testing -cio')
self.assertTrue(os.path.exists(os.path.join(root, "build.py")))
self.assertTrue(os.path.exists(os.path.join(root, ".travis.yml")))
self.assertTrue(os.path.exists(os.path.join(root, ".travis/install.sh")))
self.assertTrue(os.path.exists(os.path.join(root, ".travis/run.sh")))
self.assertFalse(os.path.exists(os.path.join(root, "appveyor.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".gitlab-ci.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".circleci/config.yml")))
client = TestClient()
root = client.current_folder
client.run('new MyPackage/1.3@myuser/testing -gi')
self.assertTrue(os.path.exists(os.path.join(root, ".gitignore")))
client = TestClient()
root = client.current_folder
client.run('new MyPackage/1.3@myuser/testing -ciglg')
self.assertTrue(os.path.exists(os.path.join(root, "build.py")))
self.assertTrue(os.path.exists(os.path.join(root, ".gitlab-ci.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis/install.sh")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis/run.sh")))
self.assertFalse(os.path.exists(os.path.join(root, "appveyor.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".circleci/config.yml")))
client = TestClient()
root = client.current_folder
client.run('new MyPackage/1.3@myuser/testing -ciglc')
self.assertTrue(os.path.exists(os.path.join(root, "build.py")))
self.assertTrue(os.path.exists(os.path.join(root, ".gitlab-ci.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis/install.sh")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis/run.sh")))
self.assertFalse(os.path.exists(os.path.join(root, "appveyor.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".circleci/config.yml")))
client = TestClient()
root = client.current_folder
client.run('new MyPackage/1.3@myuser/testing -ciccg')
self.assertTrue(os.path.exists(os.path.join(root, "build.py")))
self.assertTrue(os.path.exists(os.path.join(root, ".circleci/config.yml")))
self.assertTrue(os.path.exists(os.path.join(root, ".circleci/install.sh")))
self.assertTrue(os.path.exists(os.path.join(root, ".circleci/run.sh")))
self.assertFalse(os.path.exists(os.path.join(root, ".gitlab-ci.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis/install.sh")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis/run.sh")))
self.assertFalse(os.path.exists(os.path.join(root, "appveyor.yml")))
client = TestClient()
root = client.current_folder
client.run('new MyPackage/1.3@myuser/testing -ciccc')
self.assertTrue(os.path.exists(os.path.join(root, "build.py")))
self.assertTrue(os.path.exists(os.path.join(root, ".circleci/config.yml")))
self.assertTrue(os.path.exists(os.path.join(root, ".circleci/install.sh")))
self.assertTrue(os.path.exists(os.path.join(root, ".circleci/run.sh")))
self.assertFalse(os.path.exists(os.path.join(root, ".gitlab-ci.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis.yml")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis/install.sh")))
self.assertFalse(os.path.exists(os.path.join(root, ".travis/run.sh")))
self.assertFalse(os.path.exists(os.path.join(root, "appveyor.yml")))
def test_new_test_package_custom_name(self):
# https://github.com/conan-io/conan/issues/8164
client = TestClient()
client.run("new mypackage/0.1 -t")
source = client.load("test_package/example.cpp")
self.assertIn('#include "hello.h"', source)
self.assertIn("hello();", source)
def test_new_cmake_lib(self):
client = TestClient()
client.run("new pkg/0.1 --template=cmake_lib")
conanfile = client.load("conanfile.py")
self.assertIn("CMakeToolchain", conanfile)
conanfile = client.load("test_package/conanfile.py")
self.assertIn("CMakeToolchain", conanfile)
cmake = client.load("test_package/CMakeLists.txt")
self.assertIn("find_package", cmake)
def test_new_reference(self):
client = TestClient()
# full reference
client.run("new MyPackage/1.3@myuser/testing --template=cmake_lib")
conanfile = client.load("conanfile.py")
self.assertIn('name = "MyPackage"', conanfile)
self.assertIn('version = "1.3"', conanfile)
# no username, no channel (with @)
client.run("new MyPackage/1.3@ --template=cmake_lib")
conanfile = client.load("conanfile.py")
self.assertIn('version = "1.3"', conanfile)
self.assertIn('name = "MyPackage"', conanfile)
# no username, no channel (without @)
client.run("new MyPackage/1.3 --template=cmake_lib")
conanfile = client.load("conanfile.py")
self.assertIn('name = "MyPackage"', conanfile)
self.assertIn('version = "1.3"', conanfile)
|
|
# $Header$
'''Faults.
'''
from pyremotevbox.ZSI import _copyright, _children, _child_elements, \
_get_idstr, _stringtypes, _seqtypes, _Node, SoapWriter, ZSIException
from pyremotevbox.ZSI.TCcompound import Struct
from pyremotevbox.ZSI.TC import QName, URI, String, XMLString, AnyElement, UNBOUNDED
from pyremotevbox.ZSI.wstools.Namespaces import SOAP, ZSI_SCHEMA_URI
from pyremotevbox.ZSI.wstools.c14n import Canonicalize
from pyremotevbox.ZSI.TC import ElementDeclaration
import traceback, cStringIO as StringIO
class Detail:
def __init__(self, any=None):
self.any = any
Detail.typecode = Struct(Detail, [AnyElement(aname='any',minOccurs=0, maxOccurs="unbounded",processContents="lax")], pname='detail', minOccurs=0)
class FaultType:
def __init__(self, faultcode=None, faultstring=None, faultactor=None, detail=None):
self.faultcode = faultcode
self.faultstring= faultstring
self.faultactor = faultactor
self.detail = detail
FaultType.typecode = \
Struct(FaultType,
[QName(pname='faultcode'),
String(pname='faultstring'),
URI(pname=(SOAP.ENV,'faultactor'), minOccurs=0),
Detail.typecode,
AnyElement(aname='any',minOccurs=0, maxOccurs=UNBOUNDED, processContents="lax"),
],
pname=(SOAP.ENV,'Fault'),
inline=True,
hasextras=0,
)
class ZSIHeaderDetail:
def __init__(self, detail):
self.any = detail
ZSIHeaderDetail.typecode =\
Struct(ZSIHeaderDetail,
[AnyElement(aname='any', minOccurs=0, maxOccurs=UNBOUNDED, processContents="lax")],
pname=(ZSI_SCHEMA_URI, 'detail'))
class ZSIFaultDetailTypeCode(ElementDeclaration, Struct):
'''<ZSI:FaultDetail>
<ZSI:string>%s</ZSI:string>
<ZSI:trace>%s</ZSI:trace>
</ZSI:FaultDetail>
'''
schema = ZSI_SCHEMA_URI
literal = 'FaultDetail'
def __init__(self, **kw):
Struct.__init__(self, ZSIFaultDetail, [String(pname=(ZSI_SCHEMA_URI, 'string')),
String(pname=(ZSI_SCHEMA_URI, 'trace'),minOccurs=0),],
pname=(ZSI_SCHEMA_URI, 'FaultDetail'), **kw
)
class ZSIFaultDetail:
def __init__(self, string=None, trace=None):
self.string = string
self.trace = trace
def __str__(self):
if self.trace:
return self.string + '\n[trace: ' + self.trace + ']'
return self.string
def __repr__(self):
return "<%s.ZSIFaultDetail %s>" % (__name__, _get_idstr(self))
ZSIFaultDetail.typecode = ZSIFaultDetailTypeCode()
class URIFaultDetailTypeCode(ElementDeclaration, Struct):
'''
<ZSI:URIFaultDetail>
<ZSI:URI>uri</ZSI:URI>
<ZSI:localname>localname</ZSI:localname>
</ZSI:URIFaultDetail>
'''
schema = ZSI_SCHEMA_URI
literal = 'URIFaultDetail'
def __init__(self, **kw):
Struct.__init__(self, URIFaultDetail,
[String(pname=(ZSI_SCHEMA_URI, 'URI')), String(pname=(ZSI_SCHEMA_URI, 'localname')),],
pname=(ZSI_SCHEMA_URI, 'URIFaultDetail'), **kw
)
class URIFaultDetail:
def __init__(self, uri=None, localname=None):
self.URI = uri
self.localname = localname
URIFaultDetail.typecode = URIFaultDetailTypeCode()
class ActorFaultDetailTypeCode(ElementDeclaration, Struct):
'''
<ZSI:ActorFaultDetail>
<ZSI:URI>%s</ZSI:URI>
</ZSI:ActorFaultDetail>
'''
schema = ZSI_SCHEMA_URI
literal = 'ActorFaultDetail'
def __init__(self, **kw):
Struct.__init__(self, ActorFaultDetail, [String(pname=(ZSI_SCHEMA_URI, 'URI')),],
pname=(ZSI_SCHEMA_URI, 'ActorFaultDetail'), **kw
)
class ActorFaultDetail:
def __init__(self, uri=None):
self.URI = uri
ActorFaultDetail.typecode = ActorFaultDetailTypeCode()
class Fault(ZSIException):
'''SOAP Faults.
'''
Client = "SOAP-ENV:Client"
Server = "SOAP-ENV:Server"
MU = "SOAP-ENV:MustUnderstand"
def __init__(self, code, string,
actor=None, detail=None, headerdetail=None):
if detail is not None and type(detail) not in _seqtypes:
detail = (detail,)
if headerdetail is not None and type(headerdetail) not in _seqtypes:
headerdetail = (headerdetail,)
self.code, self.string, self.actor, self.detail, self.headerdetail = \
code, string, actor, detail, headerdetail
ZSIException.__init__(self, code, string, actor, detail, headerdetail)
def DataForSOAPHeader(self):
if not self.headerdetail: return None
# SOAP spec doesn't say how to encode header fault data.
return ZSIHeaderDetail(self.headerdetail)
def serialize(self, sw):
'''Serialize the object.'''
detail = None
if self.detail is not None:
detail = Detail()
detail.any = self.detail
pyobj = FaultType(self.code, self.string, self.actor, detail)
sw.serialize(pyobj, typed=False)
def AsSOAP(self, **kw):
header = self.DataForSOAPHeader()
sw = SoapWriter(**kw)
self.serialize(sw)
if header is not None:
sw.serialize_header(header, header.typecode, typed=False)
return str(sw)
def __str__(self):
strng = str(self.string) + "\n"
if hasattr(self, 'detail'):
if hasattr(self.detail, '__len__'):
for d in self.detail:
strng += str(d)
else:
strng += str(self.detail)
return strng
def __repr__(self):
return "<%s.Fault at %s>" % (__name__, _get_idstr(self))
AsSoap = AsSOAP
def FaultFromNotUnderstood(uri, localname, actor=None):
detail, headerdetail = None, URIFaultDetail(uri, localname)
return Fault(Fault.MU, 'SOAP mustUnderstand not understood',
actor, detail, headerdetail)
def FaultFromActor(uri, actor=None):
detail, headerdetail = None, ActorFaultDetail(uri)
return Fault(Fault.Client, 'Cannot process specified actor',
actor, detail, headerdetail)
def FaultFromZSIException(ex, actor=None):
'''Return a Fault object created from a ZSI exception object.
'''
mystr = getattr(ex, 'str', None) or str(ex)
mytrace = getattr(ex, 'trace', '')
elt = '''<ZSI:ParseFaultDetail>
<ZSI:string>%s</ZSI:string>
<ZSI:trace>%s</ZSI:trace>
</ZSI:ParseFaultDetail>
''' % (mystr, mytrace)
if getattr(ex, 'inheader', 0):
detail, headerdetail = None, elt
else:
detail, headerdetail = elt, None
return Fault(Fault.Client, 'Unparseable message',
actor, detail, headerdetail)
def FaultFromException(ex, inheader, tb=None, actor=None):
'''Return a Fault object created from a Python exception.
<SOAP-ENV:Fault>
<faultcode>SOAP-ENV:Server</faultcode>
<faultstring>Processing Failure</faultstring>
<detail>
<ZSI:FaultDetail>
<ZSI:string></ZSI:string>
<ZSI:trace></ZSI:trace>
</ZSI:FaultDetail>
</detail>
</SOAP-ENV:Fault>
'''
tracetext = None
if tb:
try:
lines = '\n'.join(['%s:%d:%s' % (name, line, func)
for name, line, func, text in traceback.extract_tb(tb)])
except:
pass
else:
tracetext = lines
exceptionName = ""
try:
exceptionName = ":".join([ex.__module__, ex.__class__.__name__])
except: pass
elt = ZSIFaultDetail(string=exceptionName + "\n" + str(ex), trace=tracetext)
if inheader:
detail, headerdetail = None, elt
else:
detail, headerdetail = elt, None
return Fault(Fault.Server, 'Processing Failure',
actor, detail, headerdetail)
def FaultFromFaultMessage(ps):
'''Parse the message as a fault.
'''
pyobj = ps.Parse(FaultType.typecode)
if pyobj.detail == None: detailany = None
else: detailany = pyobj.detail.any
return Fault(pyobj.faultcode, pyobj.faultstring,
pyobj.faultactor, detailany)
if __name__ == '__main__': print _copyright
|
|
#
# Copyright (c) 2016, SUSE LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ceph-auto-aws nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import copy
import logging
import time
from handson.keypair import Keypair
from handson.myyaml import stanza
from handson.region import Region
from handson.subnet import Subnet
from handson.tag import apply_tag
from handson.util import (
derive_ip_address,
get_file_as_string,
template_token_subst,
)
log = logging.getLogger(__name__)
class Delegate(Region):
def __init__(self, args, delegate):
super(Delegate, self).__init__(args)
self.args = args
k = Keypair(self.args, delegate)
k.keypair_obj(import_ok=True, dry_run=self.args.dry_run)
s = Subnet(self.args, delegate)
s_obj = s.subnet_obj(create=True, dry_run=self.args.dry_run)
ec2 = self.ec2()
self._delegate = {
'delegate': delegate,
'ec2': ec2,
'keyname': k.get_keyname_from_yaml(),
'roles': {},
'subnet_obj': s_obj,
}
def apply_tags(self, aws_obj, role=None):
delegate = self._delegate['delegate']
apply_tag(aws_obj, tag='Name', val=stanza('nametag'))
apply_tag(aws_obj, tag='Role', val=role)
apply_tag(aws_obj, tag='Delegate', val=delegate)
def preexisting_instances(self):
delegate = self._delegate['delegate']
ec2 = self._delegate['ec2']
s_obj = self._delegate['subnet_obj']
s_id = s_obj.id
instance_list = ec2.get_only_instances(
filters={"subnet-id": s_id}
)
count = len(instance_list)
if count > 0:
log.warning("Delegate {} (subnet {}) already has {} instances"
.format(delegate, s_obj.cidr_block, count))
return count
def set_subnet_map_public_ip(self):
"""
Attempts to set the MapPublicIpOnLaunch attribute to True.
Code taken from http://stackoverflow.com/questions/25977048
Author: Mark Doliner
"""
ec2 = self._delegate['ec2']
subnet_id = self._delegate['subnet_obj'].id
orig_api_version = ec2.APIVersion
ec2.APIVersion = '2014-06-15'
ec2.get_status(
'ModifySubnetAttribute',
{'SubnetId': subnet_id, 'MapPublicIpOnLaunch.Value': 'true'},
verb='POST'
)
ec2.APIVersion = orig_api_version
return None
def roles_to_install(self):
delegate = self._delegate['delegate']
rti = []
rdd = {}
if delegate == 0:
role_def = self.assemble_role_def('master')
rdd['master'] = role_def
rti.append('master')
if delegate > 0:
cluster_def = stanza('cluster-definition')
for cluster_def_entry in cluster_def:
role = cluster_def_entry['role']
role_def = self.assemble_role_def(role)
rdd[role] = role_def
rti.append(role)
return (rti, rdd)
def ready_to_install(self, dry_run=False):
if self.preexisting_instances():
return False
if dry_run:
return True
(rti, self._delegate['role_defs']) = self.roles_to_install()
return rti
def assemble_role_def(self, role):
rd = stanza('role-definitions')
rv = copy.deepcopy(rd['defaults'])
for a in rd[role]:
rv[a] = rd[role][a]
return rv
def instantiate_role(self, role):
delegate = self._delegate['delegate']
ec2 = self._delegate['ec2']
rd = self._delegate['role_defs'][role]
log.info("Instantiating role {} from role-def {!r}".format(role, rd))
private_ip = derive_ip_address(
self._delegate['subnet_obj'].cidr_block,
self._delegate['delegate'],
rd['last-octet'],
)
# kwargs we use always
our_kwargs = {
"key_name": self._delegate['keyname'],
"subnet_id": self._delegate['subnet_obj'].id,
"instance_type": rd['type'],
"private_ip_address": private_ip,
"placement": self.availability_zone()
}
# conditional kwargs
if rd['user-data']:
u = get_file_as_string(rd['user-data'])
log.info("Read {} characters of user-data from file {}"
.format(len(u), rd['user-data']))
# FIXME master IP address is hardcoded
# FIXME template_token_subst() calls are hardcoded
u = template_token_subst(u, '@@MASTER_IP@@', '10.0.0.10')
u = template_token_subst(u, '@@DELEGATE@@', delegate)
u = template_token_subst(u, '@@ROLE@@', role)
u = template_token_subst(u, '@@NODE_NO@@', rd['node-no'])
u = template_token_subst(u, '@@REGION@@', self.region())
our_kwargs['user_data'] = u
reservation = ec2.run_instances(rd['ami-id'], **our_kwargs)
i_obj = reservation.instances[0]
self.apply_tags(i_obj, role=role)
v_obj = None
if rd['volume']:
vol_size = int(rd['volume'])
log.info("Role {} requires {}GB volume".format(role, vol_size))
if vol_size > 0:
v_obj = ec2.create_volume(vol_size, i_obj.placement)
self.apply_tags(v_obj, role=role)
return (i_obj, v_obj)
def instance_await_state(self, role, instance_id, state='running'):
return self.await_state(
role,
instance_id,
state=state,
thing='instance'
)
def volume_await_state(self, role, volume_id, state='running'):
return self.await_state(
role,
volume_id,
state=state,
thing='volume'
)
def await_state(self, role, t_id, thing=None, state=None):
log.info("Waiting for {} {} to reach '{}' state"
.format(role, thing, state))
ec2 = self._delegate['ec2']
while True:
if thing == 'instance':
things = ec2.get_only_instances(instance_ids=[t_id])
aws_state = things[0].state
elif thing == 'volume':
things = ec2.get_all_volumes(volume_ids=[t_id])
aws_state = things[0].status
else:
assert 1 == 0, "Programmer brain failure"
log.info("Current state is {}".format(aws_state))
if aws_state != state:
log.info("Sleeping for 5 seconds")
time.sleep(5)
else:
# log.info("Sleeping another 5 seconds for good measure"
# time.sleep(5)
break
def install(self, dry_run=False):
self._delegate['roles'] = self.ready_to_install(dry_run=dry_run)
if not self._delegate['roles']:
return None
if dry_run:
log.info("Dry run: doing nothing")
delegate = self._delegate['delegate']
c_stanza = stanza('clusters')
c_stanza[delegate] = {}
stanza('clusters', c_stanza)
self.set_subnet_map_public_ip()
# instantiate node for each role
aws_objs = {}
for role in self._delegate['roles']:
c_stanza[delegate][role] = {}
stanza('clusters', c_stanza)
(i_obj, v_obj) = self.instantiate_role(role)
aws_objs[role] = {}
aws_objs[role]['instance_obj'] = i_obj
aws_objs[role]['volume_obj'] = v_obj
c_stanza[delegate][role]['instance_id'] = i_obj.id
c_stanza[delegate][role]['placement'] = i_obj.placement
if v_obj:
c_stanza[delegate][role]['volume_id'] = v_obj.id
stanza('clusters', c_stanza)
log.info("Instantiated {} node (instance ID {})"
.format(role, i_obj.id))
# attach volumes
ec2 = self._delegate['ec2']
for role in self._delegate['roles']:
i_obj = aws_objs[role]['instance_obj']
v_obj = aws_objs[role]['volume_obj']
if v_obj:
c_stanza[delegate][role]['volume_id'] = v_obj.id
self.instance_await_state(role, i_obj.id, state='running')
self.volume_await_state(role, v_obj.id, state='available')
assert ec2.attach_volume(v_obj.id, i_obj.id, '/dev/sdb'), (
"Failed to attach volume to role {}, delegate {}"
.format(role, delegate))
return None
def is_attached(self, v_id, i_id):
ec2 = self._delegate['ec2']
attached_vol = ec2.get_all_volumes(
filters={
"volume-id": v_id,
"attachment.instance-id": i_id,
"attachment.device": "/dev/sdb"
}
)
log.debug("attached_vol == {}".format(attached_vol))
if attached_vol is None or len(attached_vol) == 0:
return False
return True
def wait_for_detachment(self, v_id, i_id):
log.info("Waiting for volume {} to be detached from instance {}"
.format(v_id, i_id))
while True:
if self.is_attached(v_id, i_id):
time.sleep(5)
log.info("Still attached")
continue
log.info("Volume has been detached")
break
def walk_clusters(self, operation=None, dry_run=False):
ec2 = self._delegate['ec2']
delegate = self._delegate['delegate']
c_stanza = stanza('clusters')
if delegate not in c_stanza:
log.warning("Delegate {} has no instances"
.format(delegate))
return None
if operation == "start":
what_done = "started"
do_what = ec2.start_instances
elif operation == "stop":
what_done = "stopped"
do_what = ec2.stop_instances
elif operation == "wipeout":
what_done = "terminated"
do_what = ec2.terminate_instances
else:
assert 1 == 0
instance_id_list = []
iv_map = {} # keys are instance IDs and values are volume IDs
for role in c_stanza[delegate]:
if dry_run:
log.info("Dry run: doing nothing for role {!r}"
.format(role))
continue
i_id = c_stanza[delegate][role]['instance_id']
instance_id_list.append(i_id)
if 'volume_id' in c_stanza[delegate][role]:
iv_map[i_id] = {
'volume_id': c_stanza[delegate][role]['volume_id'],
'role': role
}
if operation == "wipeout" and iv_map:
ec2.stop_instances(instance_ids=iv_map.keys())
# for i_id in iv_map.keys():
# self.instance_await_state(
# iv_map[i_id]['role'],
# i_id,
# state='stopped',
# )
log.info("Detaching {} volumes...".format(len(iv_map)))
for i_id in iv_map.keys():
v_id = iv_map[i_id]['volume_id']
v_list = ec2.get_all_volume_status(volume_ids=[v_id])
log.debug("Volume {} status {}"
.format(v_id, v_list[0].__dict__))
if self.is_attached(v_id, i_id):
ec2.detach_volume(
v_id,
instance_id=i_id,
device='/dev/sdb',
force=True
)
log.info("Deleting {} volumes...".format(len(iv_map)))
for i_id in iv_map.keys():
v_id = iv_map[i_id]['volume_id']
self.wait_for_detachment(v_id, i_id)
ec2.delete_volume(v_id)
if instance_id_list:
do_what(instance_ids=instance_id_list)
if operation == "wipeout" and not dry_run:
del(c_stanza[delegate])
stanza('clusters', c_stanza)
log.info("{} instances {} for delegate {}"
.format(len(instance_id_list), what_done, delegate))
def wipeout(self, dry_run=False):
self.walk_clusters(operation='wipeout', dry_run=dry_run)
def stop(self, dry_run=False):
self.walk_clusters(operation='stop', dry_run=dry_run)
def start(self, dry_run=False):
self.walk_clusters(operation='start', dry_run=dry_run)
def fetch_public_ip(self, role):
ec2 = self._delegate['ec2']
subnet_id = self._delegate['subnet_obj'].id
instances = ec2.get_only_instances(
filters={
"subnet-id": subnet_id,
"tag-key": "Role",
"tag-value": role
}
)
found = False
public_ip = ''
for i in instances:
public_ip = "{}".format(i.ip_address)
found = True
if not found:
public_ip = "(none)"
return public_ip
def probe(self):
delegate = self._delegate['delegate']
c_stanza = stanza('clusters')
if delegate not in c_stanza:
log.info("Delegate {} not instantiated".format(delegate))
return None
d_stanza = c_stanza[delegate]
retval = False
for role in d_stanza.keys():
retval = True
log.info("Delegate {}, role {}, public IP {}"
.format(delegate, role, self.fetch_public_ip(role)))
return retval
def public_ips(self):
delegate = self._delegate['delegate']
c_stanza = stanza('clusters')
public_ips = {}
if delegate not in c_stanza:
log.info("Delegate {} not instantiated".format(delegate))
return None
d_stanza = c_stanza[delegate]
for role in d_stanza.keys():
public_ips[role] = self.fetch_public_ip(role)
return public_ips
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 61142
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
#!/usr/bin/env python
from __future__ import division
from __future__ import with_statement
import numpy as np
#from pylab import ion
import matplotlib as mpl
#from matplotlib.path import Path
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
#import numexpr as ne
#from numba import autojit
import sys
import time
import cPickle as pickle
import collections
from multiprocessing import Process, Queue
from smartFormat import smartFormat
from genericUtils import wstdout, wstderr
__author__ = "J.L. Lanfranchi"
__email__ = "jll1062@phys.psu.edu"
__copyright__ = "Copyright 2014 J.L. Lanfranchi"
__credits__ = ["J.L. Lanfranchi"]
__license__ = """Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
#-- Turn live-interactive plotting on (makes updated plots appear animated)
#ion()
#-- Adjust the font used on the plots
font = {'family' : 'serif', 'weight' : 'normal', 'size' : 8}
mpl.rc('font', **font)
def step(previousDirection, currentCoord, relMoveDir):
newDirection = (previousDirection + relMoveDir) % 4
if newDirection is 0:
return (currentCoord[0]+1, currentCoord[1])
elif newDirection is 1:
return (currentCoord[0], currentCoord[1]+1)
elif newDirection is 2:
return (currentCoord[0]-1, currentCoord[1])
else:
return (currentCoord[0], currentCoord[1]-1)
#@autojit
def measureChain(chain):
"""Measures the Euclidean distance from the startpoint to endpoint of
a chain"""
return np.sqrt((chain[-1][0] - chain[0][0])**2
+ (chain[-1][1] - chain[0][1])**2)
#@autojit
def simpleAttemptToCreateChain(nSteps, changesInDir=(0,-1,+1)):
"""State is taken to be direction of travel; there are 4 directions, so
four states: right (0), up (1), left (2), and down (3). The only allowed
transitions are
state -> state + 1 (modulo 4)
state -> state - 1 (modulo 4)
state -> state
Then, it must be checked that the coordinate isn't already in the chain;
if it is, then None is returned; otherwise, the algo repeats until a
chain of nSteps is reached.
"""
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
#chainCoords = collections.deque([(0,0)])
#chainCoords = [(0,0)]
chainCoords = {(0,0):True}
coord = (0,1)
#chainCoords.append(coord)
chainCoords.update({coord:True})
previousDirection = 1
length = 1
nChangesInDir = len(changesInDir)
while True:
relMoveDir = changesInDir[np.random.randint(low=0,
high=nChangesInDir)]
coord = step(previousDirection, coord, relMoveDir)
if coord in chainCoords:
return None
#chainCoords.append(coord)
chainCoords.update({coord:True})
length += 1
if length == nSteps:
return chainCoords
class CreateChainWorkerClass(Process):
"""Direction is direction of travel; there are 4 directions, so
right=0, up=1, left=2, and down=3. By default the only allowed transitions
are
direction -> direction + 1 (modulo 4)
direction -> direction - 1 (modulo 4)
direction -> direction
but the changesInDir allows the user to specify what changes are allowed.
Note that each is chosen with equal probability.
Next, it is checked that the coordinate isn't already in the chain;
if it is in the chain, then None is returned; otherwise, the algo repeats
until a chain of nSteps is reached.
"""
# TODO: shared "tries" object contains single int, gets incremented
# for each try
def __init__(self, chainsQueue, nSteps, changesInDir=(0,-1,+1), nChains=1):
Process.__init__(self)
wstdout("0")
self.chainsQueue = chainsQueue
self.nSteps = nSteps
self.changesInDir = changesInDir
self.nChains = nChains
self.nChangesInDir = len(changesInDir)
wstdout("1\n")
def step(self, previousDirection, currentCoord, relMoveDir):
newDirection = (previousDirection + relMoveDir) % 4
if newDirection is 0:
return (currentCoord[0]+1, currentCoord[1])
elif newDirection is 1:
return (currentCoord[0], currentCoord[1]+1)
elif newDirection is 2:
return (currentCoord[0]-1, currentCoord[1])
else:
return (currentCoord[0], currentCoord[1]-1)
def run(self):
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
while True:
chainCoords = collections.deque([(0,0)], maxlen=nSteps+1)
coord = (0,1)
chainCoords.append(coord)
previousDirection = 1
thisChainLen = 1
while thisChainLen < self.nSteps:
if self.chainsQueue.qsize() >= self.nChains:
return
relMoveDir = self.changesInDir[
np.random.randint(0,self.nChangesInDir)]
coord = self.step(previousDirection, coord, relMoveDir)
if coord in chainCoords:
break
chainCoords.append(coord)
thisChainLen += 1
if thisChainLen == self.nSteps:
self.chainsQueue.put(chainCoords)
break
def createChainWorker(chainsQueue, nSteps, changesInDir=(0,-1,+1), nChains=1):
"""Direction is direction of travel; there are 4 directions, so
right=0, up=1, left=2, and down=3. By default the only allowed transitions
are
direction -> direction + 1 (modulo 4)
direction -> direction - 1 (modulo 4)
direction -> direction
but the changesInDir allows the user to specify what changes are allowed.
Note that each is chosen with equal probability.
Next, it is checked that the coordinate isn't already in the chain;
if it is in the chain, then None is returned; otherwise, the algo repeats
until a chain of nSteps is reached.
"""
# TODO: shared "tries" object contains single int, gets incremented
# for each try
#wstdout("0")
chainsQueue = chainsQueue
nSteps = nSteps
nChains = nChains
nChangesInDir = len(changesInDir)
#wstdout("1")
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
#lattice = np.zeros((nSteps*2,nSteps*2), dtype=np.bool8)
#lattice[nSteps,nSteps] = True
#lattice[nSteps,nSteps+1] = True
while True:
#chainCoords = collections.deque([(nSteps,nSteps),(nSteps,nSteps+1)],
# maxlen=nSteps+1)
#chainCoords = [(nSteps,nSteps), (nSteps,nSteps+1)]
chainCoords = [(0,0), (0,1)]
#chainCoords = {(0,0):True}
coord = (0,1)
#chainCoords.append(coord)
#lattice[coord] = True
#chainCoords.update({coord:True})
previousDirection = 1
thisChainLen = 1
#iteration = 0
while thisChainLen < nSteps:
#iteration += 1
#wstdout("2")
#if iteration % 10 == 0:
# if chainsQueue.qsize() >= nChains:
# return
relMoveDir = changesInDir[
np.random.randint(0,nChangesInDir)]
coord = step(previousDirection, coord, relMoveDir)
#if lattice[coord]:
if coord in chainCoords:
#for c in chainCoords[2:]:
# lattice[c] = False
#wstdout("x")
break
else:
chainCoords.append(coord)
#lattice[coord] = True
#chainCoords.update({coord:True})
thisChainLen += 1
if thisChainLen == nSteps:
chainsQueue.put(chainCoords)
break
def reptateChainWorker(chainsToReptate, lenSq, nChainsToCompute):
chain = chainsToReptate.get()
result = chain.reptate()
for (childChain, operation) in childChains:
if operation == 'move':
lenSq.put(measureChain(childChain))
if lenSq.len() < nChainsToCompute:
chainsToReptate.put(childChain)
def simpleCreateChain(nSteps=5, changesInDir=(-1,0,1)):
while True:
chain = simpleAttemptToCreateChain(nSteps, changesInDir=changesInDir)
if chain != None:
break
return collections.deque(chain, maxlen=nSteps+1)
def createChainParallel(nSteps=60, nChains=1, changesInDir=(-1,0,1), nProcs=4):
chainsQueue = Queue()
args = (chainsQueue, nSteps, changesInDir, nChains)
##pool = Pool(processes=nProcs)
#kwargs = {'nSteps': nSteps,
# 'changesInDir': changesInDir,
# 'nChains': nChains,
# 'chainsQueue': chainsQueue
# }
#for procN in range(nProcs):
# #pool.apply_async(CreateChainWorkerClass, kwds=kwargs)
# CreateChainWorkerClass, kwds=kwargs)
#while chainsQueue.qsize() < nChains:
# time.sleep(0.2)
#chains = []
#while not chainsQueue.empty():
# chains.append(chainsQueue.get())
procs = []
for n in range(nProcs):
procs.append(Process(target=createChainWorker,args=args))
[proc.start() for proc in procs]
#while chainsQueue.qsize() < nChains:
# time.sleep(0.1)
chains = []
#while not chainsQueue.empty():
#while len(chains) < nChains:
#time.sleep(0.5)
chains.append(chainsQueue.get())
[proc.terminate() for proc in procs]
return chains
#class chain:
# def __init__(self, nSteps, initialChain=None):
# self.moveForward = True
# if initialChain == None:
# self.nSteps = nSteps
# self.
#
# self.coordinates = collections.deque(coords, maxlen=nSteps)
class reptationChain90:
"""
90-degree-only reptation chain of length nSteps
"""
def __init__(self, nSteps, initialChain):
self.nSteps = nSteps
def reptate(self):
pass
def createChainReptation(nSteps):
"""State is taken to be direction of travel; there are 4 directions, so
four states: right (0), up (1), left (2), and down (3). The only allowed
transitions are
state -> state + 1 (modulo 4)
state -> state - 1 (modulo 4)
state -> state
Then, it must be checked that the coordinate isn't already in the chain;
if it is, then None is returned; otherwise, the algo repeats until a
chain of nSteps is reached.
"""
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
chainCoords = [(0,0)]
chainCoords = [(0,0)]
coord = (0,1)
chainCoords.append(coord)
state = 1
length = 1
#np.random.seed(int(time.time()*1000)%120)
#np.random.seed(2)
while True:
randVal = np.random.randint(low=-1, high=2)
state = (state + randVal) % 4
if state is 0:
coord = (coord[0]+1, coord[1])
elif state is 1:
coord = (coord[0], coord[1]+1)
elif state is 2:
coord = (coord[0]-1, coord[1])
elif state is 3:
coord = (coord[0], coord[1]-1)
if coord in chainCoords:
return None
chainCoords.append(coord)
length += 1
if length == nSteps:
return chainCoords
def measureChain(chain):
"""Measures the Euclidean distance from the startpoint to endpoint of
a chain"""
return np.sqrt((chain[-1][0] - chain[0][0])**2
+ (chain[-1][1] - chain[0][1])**2)
formatDic = {'sigFigs': 4, 'demarc': "", 'threeSpacing': False, 'rightSep':""}
def powerLaw(x, power, multFact, offset):
return multFact*(x**power) + offset
def powerLawLatex(power, multFact=1, offset=0, pcov=None):
offsetStr = smartFormat(offset, alwaysShowSign=True, **formatDic)
if not (offsetStr[0] == "+" or offsetStr[0] == "-"):
offsetStr = "+" + offsetStr
latex = r"$" + smartFormat(multFact, **formatDic) + \
r" \cdot N^{" + smartFormat(power, **formatDic) + r"} " + \
offsetStr + \
r"$"
return latex
def exponential(x, expExponent, multFact=1):
return multFact * np.exp(np.array(x)*expExponent)
def exponentialLatex(expExponent, multFact=1, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}$"
return latex
def expPower(x, expExponent, powerLawExp, multFact):
x = np.array(x)
return multFact * np.exp(x*expExponent) * x**powerLawExp
def expPowerLatex(expExponent, powerLawExp, multFact, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}\cdot N^{" + smartFormat(powerLawExp, **formatDic) + \
r"}$"
return latex
class SimulationData:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Simulation:
def __init__(self):
self.sd = SimulationData()
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
self.stateFilename = "p7x28_state.pk"
def saveState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'wb') as stateFile:
pickle.dump(self.sd, stateFile, -1)
def loadState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'rb') as stateFile:
self.sd = pickle.load(stateFile)
def runSimulation(self, targetSuccesses=10, stepsRange=(4,50),
plotting=False):
#-- Reset state variables for a new simulation run
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
timeLastSaved = time.time()
self.sd.targetSuccesses = targetSuccesses
self.sd.stepsInChains = range(stepsRange[0],stepsRange[1])
self.sd.allChainFinalCoords = []
self.sd.allMeanChainFinalCoords = []
self.sd.meanChainFinalCoords = []
self.sd.chainSquareLengthAvg = []
self.sd.successRatio = []
self.sd.timingAvg = []
if plotting:
self.fig1 = plt.figure(1)
self.fig1.clf()
self.ax1 = fig1.add_subplot(111)
line, = ax1.plot([], [], 'ko-', lw=2)
self.ax1.set_xlim(-20,20)
self.ax1.set_ylim(-20,20)
ax1.axis('image')
plt.draw()
for stepsThisChain in self.sd.stepsInChains:
startTime = time.time()
successfulChains = []
chainSquareLengths = []
chainFinalCoords = []
meanChainFinalCoord = []
nSuccesses = 0
trialN = 0
while nSuccesses < self.sd.targetSuccesses:
trialN += 1
chain = simpleAttemptToCreateChain(stepsThisChain,(-1,0,1))
if chain == None:
continue
successfulChains.append(chain)
chain = np.array(chain)
chainSquareLengths.append(measureChain(chain)**2)
chainFinalCoords.append(chain[-1,:])
nSuccesses += 1
if plotting:
line.set_data(chain[:,0],chain[:,1])
self.ax1.set_xlim(-20,20)
self.ax1.set_ylim(-20,20)
plt.draw()
time.sleep(0.005)
chainFinalCoords = np.array(chainFinalCoords)
self.sd.allChainFinalCoords.append(chainFinalCoords)
self.sd.allMeanChainFinalCoords.append(meanChainFinalCoord)
self.sd.meanChainFinalCoord = np.mean(chainFinalCoords, 0)
self.sd.chainSquareLengthAvg.append(np.mean(chainSquareLengths))
self.sd.successRatio.append(nSuccesses / trialN)
self.sd.timingAvg.append( (time.time()-startTime)/nSuccesses )
sys.stdout.write("\nstepsThisChain = " + str(stepsThisChain) + "\n")
sys.stdout.write(" nSuccesses/nTrials = " + str(nSuccesses) + "/"
+ str(trialN) + " = "
+ str(self.sd.successRatio[-1]) + "\n")
sys.stdout.write(" time/success = " +
str(self.sd.timingAvg[-1]) + "\n")
sys.stdout.flush()
if (time.time() - timeLastSaved) > 60*5:
self.saveState()
timeLastSaved = time.time()
self.sd.allMeanChainFinalCoords = \
np.array(self.sd.allMeanChainFinalCoords)
#-- TODO: mean of final-position vector (r_N vector)
#np.sqrt(allMeanChainFinalCoords[:,0]**2+
# allMeanChainFinalCoords[:,1]**2)
self.sd.simulationCompleted = True
self.saveState()
def postproc(self):
"""Perform curve fitting to the data"""
#-- Update state
self.sd.postprocCompleted = False
#-- Check that simulation data is present
if not self.sd.simulationCompleted:
raise Exception("No simulation run; cannot perform curve fit!")
#-- Same x data is used for *all* the below curve fits
x = self.sd.stepsInChains
#============================================================
# Fit success fraction with const * exponential * power law
#============================================================
y = self.sd.successRatio
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
p0 = (-0.117, 0.1, 2)
popt1, pcov1 = curve_fit(f=expPower, xdata=x, ydata=y, sigma=sigma,
p0=p0)
self.sd.fit1 = expPower(x, *popt1)
self.sd.fit1eqn = expPowerLatex(*popt1)
print popt1, pcov1, "\n"
#============================================================
# TODO: Fit the final position data
#============================================================
#y = (self.sd.chainLengthAvg)
#sigma = list(np.array(y))
#popt2, pcov2 = curve_fit(powerLaw, x, y, sigma=sigma)
#self.sd.fit2 = powerLaw(x, *popt2)
#self.sd.fit2eqn = powerLawLatex(*popt2)
#print popt2, pcov2, "\n"
#============================================================
# Fit R_N^2 with const * power-law + const
#============================================================
y = self.sd.chainSquareLengthAvg
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt3, pcov3 = curve_fit(f=powerLaw, xdata=x, ydata=y, sigma=sigma)
self.sd.fit3 = powerLaw(x, *popt3)
self.sd.fit3eqn = powerLawLatex(*popt3)
print popt3, pcov3, "\n"
#============================================================
# Exponential fit to wall-clock time (not as good a fit as
# exp*power, so this is commented out)
#============================================================
#y = (self.sd.timingAvg)
##p0 = (0.0985, 0.1, 1.65e-5)
#p0 = (0.0985, 1)
#sigma = list(np.array(y))
#popt4, pcov4 = curve_fit(f=exponential, xdata=x, ydata=y, sigma=sigma,
# p0=p0, )
#self.sd.fit4 = exponential(x, *popt4)
#self.sd.fit4eqn = exponentialLatex(*popt4)
#print popt4, pcov4, "\n"
#============================================================
# Exponential * power-law fit to wall-clock time
#============================================================
y = self.sd.timingAvg
#-- Initial guess
p0 = (0.129, 0, 2.981e-3)
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt4, pcov4 = curve_fit(f=expPower, xdata=x, ydata=y, sigma=sigma,
p0=p0, )
self.sd.fit4 = expPower(x, *popt4)
self.sd.fit4eqn = expPowerLatex(*popt4)
print popt4, pcov4, "\n"
#-- Update state
self.sd.postprocCompleted = True
def plotResults(self, savePlot=True):
"""Plot the data and the fit curves"""
if not self.sd.simulationCompleted:
raise Exception("No simulation has been run; cannot plot results!")
if not self.sd.postprocCompleted:
self.postproc()
self.fig2 = plt.figure(2, figsize=(7,12), dpi=80)
self.fig2.clf()
self.ax21 = self.fig2.add_subplot(311)
self.ax21.plot(self.sd.stepsInChains, self.sd.successRatio,
'bo', label="data", markersize=4)
self.ax21.plot(self.sd.stepsInChains, self.sd.fit1,
'r-', label=self.sd.fit1eqn, linewidth=2, alpha=0.75)
self.ax21.set_title(
"Non-intersecting 2D random-walk chains;" +
" stop condition: " + str(self.sd.targetSuccesses) +
" successfully-built chains")
self.ax21.set_ylabel(r"Success fraction $f(N)$")
self.ax21.set_yscale('log')
self.ax21.grid(which='major', b=True)
self.ax21.legend(loc="best", fancybox=True, shadow=True)
#-- TODO: average of final position plot
#self.ax22 = fig2.add_subplot(412)
#self.ax22.plot(self.sd.stepsInChains, self.sd.chainLengthAvg,
# 'bo', label="data", markersize=4)
#self.ax22.plot(self.sd.stepsInChains, self.sd.fit2,
# 'r-', label=self.sd.fit2eqn, linewidth=2, alpha=0.75)
#self.ax22.set_ylabel(r"$\langle R_N \rangle$")
##self.ax22.set_yscale('log')
#ax22.grid(which='major', b=True)
#ax22.legend(loc="best", fancybox=True, shadow=True)
self.ax23 = self.fig2.add_subplot(312)
self.ax23.plot(self.sd.stepsInChains, self.sd.chainSquareLengthAvg,
'bo', label="data", markersize=4)
self.ax23.plot(self.sd.stepsInChains, self.sd.fit3,
'r-', label=self.sd.fit3eqn, linewidth=2, alpha=0.75)
self.ax23.set_ylabel(r"$\langle R_N^2\rangle$")
self.ax23.grid(which='major', b=True)
self.ax23.legend(loc="upper left", fancybox=True, shadow=True)
self.ax24 = self.fig2.add_subplot(313)
self.ax24.plot(self.sd.stepsInChains, self.sd.timingAvg,
'bo', label="data", markersize=4)
self.ax24.plot(self.sd.stepsInChains, self.sd.fit4,
'r-', label=self.sd.fit4eqn, linewidth=2, alpha=0.75)
self.ax24.set_xlabel(r"Nmber of steps in walk, $N$")
self.ax24.set_ylabel("Wall-clock time per successful chain (s)")
self.ax24.set_yscale('log')
self.ax24.grid(which='major', b=True)
self.ax24.legend(loc="upper left", fancybox=True, shadow=True)
self.fig2.tight_layout()
if savePlot:
self.fig2.savefig("2014-01-14_problem7x28_plots.pdf")
self.fig2.savefig("2014-01-14_problem7x28_plots.png", dpi=120)
plt.show()
if __name__ == "__main__":
##-- Instantiate the Simulation object
#sim = Simulation()
##-- Try to load the sim data from any previous run; if no data saved
## to disk in the default location, run a new simulation
#try:
# sim.loadState()
#except Exception as e:
# print "Error({0}: {1}".format(e.errno, e.strerror)
# #sim.runSimulation(targetSuccesses=10, stepsRange=(4,101))
# sim.runSimulation(targetSuccesses=10, stepsRange=(10,70))
##-- *Always* perform post-processing and plotting (allows easy modification
## of the postprocessing (curve fitting) and plotting routines
## without needing to re-run the simulation, which can take hours)
#sim.postproc()
#sim.plotResults()
##print simpleCreateChain(nSteps=20)
chains = createChainParallel(nSteps=60, nProcs=6, nChains=1)
[wstdout(str(len(chain)) + " ") for chain in chains]
wstdout("\n")
|
|
# -*- coding: utf-8 -*-
"""
sphinx.domains.javascript
~~~~~~~~~~~~~~~~~~~~~~~~~
The JavaScript domain.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from sphinx import addnodes
from sphinx.domains import Domain, ObjType
from sphinx.locale import l_, _
from sphinx.directives import ObjectDescription
from sphinx.roles import XRefRole
from sphinx.domains.python import _pseudo_parse_arglist
from sphinx.util.nodes import make_refnode
from sphinx.util.docfields import Field, GroupedField, TypedField
class JSObject(ObjectDescription):
"""
Description of a JavaScript object.
"""
#: If set to ``True`` this object is callable and a `desc_parameterlist` is
#: added
has_arguments = False
#: what is displayed right before the documentation entry
display_prefix = None
def handle_signature(self, sig, signode):
sig = sig.strip()
if '(' in sig and sig[-1:] == ')':
prefix, arglist = sig.split('(', 1)
prefix = prefix.strip()
arglist = arglist[:-1].strip()
else:
prefix = sig
arglist = None
if '.' in prefix:
nameprefix, name = prefix.rsplit('.', 1)
else:
nameprefix = None
name = prefix
objectname = self.env.temp_data.get('js:object')
if nameprefix:
if objectname:
# someone documenting the method of an attribute of the current
# object? shouldn't happen but who knows...
nameprefix = objectname + '.' + nameprefix
fullname = nameprefix + '.' + name
elif objectname:
fullname = objectname + '.' + name
else:
# just a function or constructor
objectname = ''
fullname = name
signode['object'] = objectname
signode['fullname'] = fullname
if self.display_prefix:
signode += addnodes.desc_annotation(self.display_prefix,
self.display_prefix)
if nameprefix:
signode += addnodes.desc_addname(nameprefix + '.', nameprefix + '.')
signode += addnodes.desc_name(name, name)
if self.has_arguments:
if not arglist:
signode += addnodes.desc_parameterlist()
else:
_pseudo_parse_arglist(signode, arglist)
return fullname, nameprefix
def add_target_and_index(self, name_obj, sig, signode):
objectname = self.options.get(
'object', self.env.temp_data.get('js:object'))
fullname = name_obj[0]
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname.replace('$', '_S_'))
signode['first'] = not self.names
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['js']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]),
line=self.lineno)
objects[fullname] = self.env.docname, self.objtype
indextext = self.get_index_text(objectname, name_obj)
if indextext:
self.indexnode['entries'].append(('single', indextext,
fullname.replace('$', '_S_'),
''))
def get_index_text(self, objectname, name_obj):
name, obj = name_obj
if self.objtype == 'function':
if not obj:
return _('%s() (built-in function)') % name
return _('%s() (%s method)') % (name, obj)
elif self.objtype == 'class':
return _('%s() (class)') % name
elif self.objtype == 'data':
return _('%s (global variable or constant)') % name
elif self.objtype == 'attribute':
return _('%s (%s attribute)') % (name, obj)
return ''
class JSCallable(JSObject):
"""Description of a JavaScript function, method or constructor."""
has_arguments = True
doc_field_types = [
TypedField('arguments', label=l_('Arguments'),
names=('argument', 'arg', 'parameter', 'param'),
typerolename='func', typenames=('paramtype', 'type')),
GroupedField('errors', label=l_('Throws'), rolename='err',
names=('throws', ),
can_collapse=True),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('returntype', label=l_('Return type'), has_arg=False,
names=('rtype',)),
]
class JSConstructor(JSCallable):
"""Like a callable but with a different prefix."""
display_prefix = 'class '
class JSXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
# basically what sphinx.domains.python.PyXRefRole does
refnode['js:object'] = env.temp_data.get('js:object')
if not has_explicit_title:
title = title.lstrip('.')
target = target.lstrip('~')
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot+1:]
if target[0:1] == '.':
target = target[1:]
refnode['refspecific'] = True
return title, target
class JavaScriptDomain(Domain):
"""JavaScript language domain."""
name = 'js'
label = 'JavaScript'
# if you add a new object type make sure to edit JSObject.get_index_string
object_types = {
'function': ObjType(l_('function'), 'func'),
'class': ObjType(l_('class'), 'class'),
'data': ObjType(l_('data'), 'data'),
'attribute': ObjType(l_('attribute'), 'attr'),
}
directives = {
'function': JSCallable,
'class': JSConstructor,
'data': JSObject,
'attribute': JSObject,
}
roles = {
'func': JSXRefRole(fix_parens=True),
'class': JSXRefRole(fix_parens=True),
'data': JSXRefRole(),
'attr': JSXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
def clear_doc(self, docname):
for fullname, (fn, _) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
def find_obj(self, env, obj, name, typ, searchorder=0):
if name[-2:] == '()':
name = name[:-2]
objects = self.data['objects']
newname = None
if searchorder == 1:
if obj and obj + '.' + name in objects:
newname = obj + '.' + name
else:
newname = name
else:
if name in objects:
newname = name
elif obj and obj + '.' + name in objects:
newname = obj + '.' + name
return newname, objects.get(newname)
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
objectname = node.get('js:object')
searchorder = node.hasattr('refspecific') and 1 or 0
name, obj = self.find_obj(env, objectname, target, typ, searchorder)
if not obj:
return None
return make_refnode(builder, fromdocname, obj[0],
name.replace('$', '_S_'), contnode, name)
def get_objects(self):
for refname, (docname, type) in self.data['objects'].iteritems():
yield refname, refname, type, docname, \
refname.replace('$', '_S_'), 1
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ClinicalTrial.full_title'
db.delete_column('arkestra_clinical_trials_clinicaltrial', 'full_title')
# Adding field 'ClinicalTrial.expanded_title'
db.add_column('arkestra_clinical_trials_clinicaltrial', 'expanded_title',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'ClinicalTrial.full_title'
db.add_column('arkestra_clinical_trials_clinicaltrial', 'full_title',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Deleting field 'ClinicalTrial.expanded_title'
db.delete_column('arkestra_clinical_trials_clinicaltrial', 'expanded_title')
models = {
'arkestra_clinical_trials.clinicaltrial': {
'Meta': {'object_name': 'ClinicalTrial'},
'body': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'chief_investigators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_chief_investigators'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Person']"}),
'clinical_centre': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_clinical_centres'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Entity']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'expanded_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'clinicaltrial_item'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalLink']"}),
'funding_body': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_funding_bodies'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Entity']"}),
'grant_value': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'hosted_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clinicaltrial_hosted_events'", 'on_delete': 'models.SET_DEFAULT', 'default': 'None', 'to': "orm['contacts_and_people.Entity']", 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'importance': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'in_lists': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'please_contact': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_person'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Person']"}),
'publish_to': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_publish_to'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Entity']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_sponsors'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Entity']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'setup'", 'max_length': '25'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'trialtype': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_trialtype'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['arkestra_clinical_trials.ClinicalTrialType']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'arkestra_clinical_trials.clinicaltrialentity': {
'Meta': {'object_name': 'ClinicalTrialEntity'},
'clinical_trials_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clinical_trials_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'entity': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'clinical_trial_entity'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'menu_title': ('django.db.models.fields.CharField', [], {'default': "'Clinical trials'", 'max_length': '50'}),
'publish_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'arkestra_clinical_trials.clinicaltrialtype': {
'Meta': {'object_name': 'ClinicalTrialType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'page_flags': ('django.db.models.fields.TextField', [], {'null': True, 'blank': True}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contacts_and_people.building': {
'Meta': {'ordering': "('site', 'street', 'number', 'name')", 'object_name': 'Building'},
'access_and_parking': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_access_and_parking'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'additional_street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_description'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'getting_here': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'getting_here'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'map': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place'", 'on_delete': 'models.PROTECT', 'to': "orm['contacts_and_people.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '256'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'default': '17', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.entity': {
'Meta': {'ordering': "['tree_id', 'lft']", 'object_name': 'Entity', '_ormbases': ['contacts_and_people.EntityLite']},
'abstract_entity': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auto_contacts_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_news_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_publications_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_vacancies_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'building_recapitulates_entity_name': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contacts_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'contacts_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Contacts & people'", 'max_length': '50'}),
'display_parent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entitylite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.EntityLite']", 'unique': 'True', 'primary_key': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity_item'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'news_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'news_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'news_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'News & events'", 'max_length': '50'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'publications_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Publications'", 'max_length': '50'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'vacancies_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vacancies_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'vacancies_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Vacancies & studentships'", 'max_length': '50'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entity'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['cms.Page']", 'blank': 'True', 'unique': 'True'})
},
'contacts_and_people.entitylite': {
'Meta': {'object_name': 'EntityLite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'contacts_and_people.membership': {
'Meta': {'ordering': "('-importance_to_entity', 'person__surname')", 'object_name': 'Membership'},
'display_role': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'display_roles'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Membership']"}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['contacts_and_people.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance_to_entity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'importance_to_person': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'key_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_of'", 'to': "orm['contacts_and_people.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.person': {
'Meta': {'ordering': "['surname', 'given_name', 'user']", 'object_name': 'Person', '_ormbases': ['contacts_and_people.PersonLite']},
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'data_feed_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'people'", 'to': "orm['contacts_and_people.Entity']", 'through': "orm['contacts_and_people.Membership']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_item'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'institutional_username': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'override_entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'people_override'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Entity']"}),
'personlite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.PersonLite']", 'unique': 'True', 'primary_key': 'True'}),
'please_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact_for'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Person']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'staff_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'person_user'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']", 'blank': 'True', 'unique': 'True'})
},
'contacts_and_people.personlite': {
'Meta': {'object_name': 'PersonLite'},
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'middle_names': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Title']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'contacts_and_people.phonecontact': {
'Meta': {'ordering': "('label',)", 'object_name': 'PhoneContact'},
'area_code': ('django.db.models.fields.CharField', [], {'default': "'029'", 'max_length': '5'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'44'", 'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_extension': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'contacts_and_people.site': {
'Meta': {'ordering': "('country', 'site_name', 'post_town')", 'object_name': 'Site'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_town': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contacts_and_people.title': {
'Meta': {'ordering': "['title']", 'object_name': 'Title'},
'abbreviation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'links.externallink': {
'Meta': {'ordering': "['title']", 'object_name': 'ExternalLink'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'external_site': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalSite']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['links.LinkType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'links.externalsite': {
'Meta': {'ordering': "['domain']", 'object_name': 'ExternalSite'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['links.ExternalSite']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'scheme': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['arkestra_clinical_trials']
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.services.types import asset_set_asset_service
from .base import AssetSetAssetServiceTransport, DEFAULT_CLIENT_INFO
class AssetSetAssetServiceGrpcTransport(AssetSetAssetServiceTransport):
"""gRPC backend transport for AssetSetAssetService.
Service to manage asset set asset.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_asset_set_assets(
self,
) -> Callable[
[asset_set_asset_service.MutateAssetSetAssetsRequest],
asset_set_asset_service.MutateAssetSetAssetsResponse,
]:
r"""Return a callable for the mutate asset set assets method over gRPC.
Creates, updates or removes asset set assets.
Operation statuses are returned.
Returns:
Callable[[~.MutateAssetSetAssetsRequest],
~.MutateAssetSetAssetsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_asset_set_assets" not in self._stubs:
self._stubs[
"mutate_asset_set_assets"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.AssetSetAssetService/MutateAssetSetAssets",
request_serializer=asset_set_asset_service.MutateAssetSetAssetsRequest.serialize,
response_deserializer=asset_set_asset_service.MutateAssetSetAssetsResponse.deserialize,
)
return self._stubs["mutate_asset_set_assets"]
__all__ = ("AssetSetAssetServiceGrpcTransport",)
|
|
# -*- coding: utf-8 -*-
import logging
import traceback
import sys
from graphic_utils import DynamicBackground, \
ScreenObjectsManager, TouchAndTextItem
from input import InputManager
import mopidy
from pkg_resources import Requirement, resource_filename
import pygame
import pdb
# StreamsScreen, SystemScreen, NowPlayingScrqaeen
from screens import BaseScreen, Keyboard, BrowseScreen, MainScreen,\
PlaylistScreen, QueueScreen, SearchScreen, MenuScreen
logger = logging.getLogger(__name__)
mainScreenIndex = 0
#nowPlayingIndex = 1
#queueIndex = 2
#playlistsIndex = 3
#browseIndex = 4
#streamsIndex = 5
#searchIndex = 6
#SystemIndex = 7
#MenuIndex = 8
queueIndex = 1
playlistsIndex = 2
browseIndex = 3
searchIndex = 4
MenuIndex = 5
class ScreenManager():
def __init__(self, size, core, cache, resolution_factor):
self.core = core
self.cache = cache
self.fonts = {}
self.background = None
self.currentScreen = mainScreenIndex
# Init variables in init
self.baseSize = None
self.size = None
self.screens = None
self.track = None
self.input_manager = InputManager(size)
self.down_bar_objects = ScreenObjectsManager()
self.down_bar = None
self.keyboard = None
self.updateType = BaseScreen.update_all
self.resolution_factor = resolution_factor
self.init_manager(size)
def init_manager(self, size):
self.size = size
self.baseSize = self.size[1] / self.resolution_factor
self.background = DynamicBackground(self.size)
font = resource_filename(Requirement.parse("mopidy-nowplayingtouch"), "mopidy_nowplayingtouch/FontAwesome.otf")
self.fonts['base'] = pygame.font.SysFont("arial", int(self.baseSize*0.9))
self.fonts['icon'] = pygame.font.Font(font, int(self.baseSize*0.9))
try:
self.screens = [
MainScreen(size, self.baseSize, self, self.fonts, self.cache, self.core, self.background),
QueueScreen(size, self.baseSize, self, self.fonts),
PlaylistScreen(size, self.baseSize, self, self.fonts),
BrowseScreen(size, self.baseSize, self, self.fonts),
SearchScreen(size, self.baseSize, self, self.fonts),
MenuScreen(size, self.baseSize, self, self.fonts, self.core)]
except:
traceback.print_exc()
self.track = None
# Menu buttons
button_size = (self.size[0] / 6, self.baseSize)
# Main button
button = TouchAndTextItem(self.fonts['icon'], u" \ue001",
(0, self.size[1] - self.baseSize),
button_size, center=True)
self.down_bar_objects.set_touch_object("menu_0", button)
x = button.get_right_pos()
# Search button
button = TouchAndTextItem(self.fonts['icon'], u" \ue002",
(x, self.size[1] - self.baseSize),
button_size, center=True)
self.down_bar_objects.set_touch_object("menu_1", button)
x = button.get_right_pos()
# Menu button
button = TouchAndTextItem(self.fonts['icon'], u" \ue60a",
(x, self.size[1] - self.baseSize),
button_size,
center=True)
self.down_bar_objects.set_touch_object("menu_5", button)
# Down bar
self.down_bar = pygame.Surface(
(self.size[0], self.size[1] - self.baseSize),
pygame.SRCALPHA)
self.down_bar.fill((0, 0, 0, 200))
self.options_changed()
self.mute_changed(self.core.playback.mute.get())
playback_state = self.core.playback.state.get()
self.playback_state_changed(playback_state, playback_state)
self.screens[MenuIndex].check_connection()
self.change_screen(self.currentScreen)
self.updateType = BaseScreen.update_all
def get_update_type(self):
if self.updateType == BaseScreen.update_all:
self.updateType = BaseScreen.no_update
return BaseScreen.update_all
else:
if self.keyboard:
return BaseScreen.no_update
else:
if self.background.should_update():
return BaseScreen.update_all
else:
if self.screens[self.currentScreen].should_update():
return BaseScreen.update_partial
else:
return BaseScreen.no_update
def update(self, screen):
update_type = self.get_update_type()
if update_type != BaseScreen.no_update:
rects = []
surface = self.background.draw_background()
if self.keyboard:
self.keyboard.update(surface)
else:
self.screens[self.currentScreen].update(surface, update_type, rects)
surface.blit(self.down_bar, (0, self.size[1] - self.baseSize))
self.down_bar_objects.render(surface)
if update_type == BaseScreen.update_all or len(rects) < 1:
screen.blit(surface, (0, 0))
pygame.display.flip()
else:
for rect in rects:
screen.blit(surface, rect, area=rect)
pygame.display.update(rects)
def track_started(self, track):
self.track = track
self.screens[mainScreenIndex].track_started(track.track)
self.screens[nowPlayingIndex].track_started(track)
def track_playback_ended(self, tl_track, time_position):
self.screens[mainScreenIndex].track_playback_ended(tl_track, time_position)
def event(self, event):
event = self.input_manager.event(event)
if event is not None:
if self.keyboard is not None:
self.keyboard.touch_event(event)
elif not self.manage_event(event):
self.screens[self.currentScreen].touch_event(event)
self.updateType = BaseScreen.update_all
def manage_event(self, event):
if event.type == InputManager.click:
objects = self.down_bar_objects.get_touch_objects_in_pos(event.current_pos)
return self.click_on_objects(objects, event)
else:
if event.type == InputManager.key and not event.longpress:
dir = event.direction
if dir == InputManager.right or dir == InputManager.left:
if not self.screens[self.currentScreen]\
.change_screen(dir):
if dir == InputManager.right:
self.change_screen(self.currentScreen+1)
else:
self.change_screen(self.currentScreen-1)
return True
elif event.unicode is not None:
if event.unicode == "n":
self.core.playback.next()
elif event.unicode == "p":
self.core.playback.previous()
elif event.unicode == "+":
volume = self.core.playback.volume.get() + 10
if volume > 100:
volume = 100
self.core.mixer.set_volume(volume)
elif event.unicode == "-":
volume = self.core.playback.volume.get() - 10
if volume < 0:
volume = 0
self.core.mixer.set_volume(volume)
elif event.unicode == " ":
if self.core.playback.get_state().get() == \
mopidy.core.PlaybackState.PLAYING:
self.core.playback.pause()
else:
self.core.playback.play()
return False
def volume_changed(self, volume):
self.screens[mainScreenIndex].volume_changed(volume)
self.updateType = BaseScreen.update_all
def playback_state_changed(self, old_state, new_state):
self.screens[mainScreenIndex].playback_state_changed(old_state, new_state)
self.updateType = BaseScreen.update_all
def mute_changed(self, mute):
self.screens[mainScreenIndex].mute_changed(mute)
self.updateType = BaseScreen.update_all
def resize(self, event):
self.init_manager(event.size)
self.updateType = BaseScreen.update_all
def tracklist_changed(self):
self.screens[nowPlayingIndex].tracklist_changed()
self.updateType = BaseScreen.update_all
def options_changed(self):
menuScreen = self.screens[MenuIndex]
#self.screens[MenuIndex].options_changed()
menuScreen.options_changed()
self.updateType = BaseScreen.update_all
def change_screen(self, new_screen):
if new_screen > -1 and new_screen < len(self.screens):
self.down_bar_objects.get_touch_object(
"menu_" + str(self.currentScreen)).set_active(False)
self.currentScreen = new_screen
self.down_bar_objects.get_touch_object(
"menu_" + str(new_screen)).set_active(True)
self.updateType = BaseScreen.update_all
def click_on_objects(self, objects, event):
if objects is not None:
for key in objects:
if key[:-1] == "menu_":
self.change_screen(int(key[-1:]))
return True
return False
def nowPlaying(self):
self.screens[nowPlayingIndex].nowPlaying_changed()
self.updateType = BaseScreen.update_all
def queue(self):
self.screens[queueIndex].queue_changed()
self.updateType = BaseScreen.update_all
def playlists(self):
self.screens[playlistsIndex].playlists_changed()
self.updateType = BaseScreen.update_all
def browse(self):
self.screens[browseIndex].browse_changed()
self.updateType = BaseScreen.update_all
def stream(self):
self.screens[streamsIndex].streams_changed()
self.updateType = BaseScreen.update_all
def stream_title_changed(self, title):
self.screens[mainScreenIndex].stream_title_changed(title)
self.updateType = BaseScreen.update_all
def search(self, query, mode):
self.screens[searchIndex].search(query, mode)
self.updateType = BaseScreen.update_all
def system(self):
self.screens[SystemIndex].system_changed()
self.updateType = BaseScreen.update_all
def open_keyboard(self, input_listener):
self.keyboard = Keyboard(self.size, self.baseSize, self,
self.fonts, input_listener)
self.updateType = BaseScreen.update_all
def close_keyboard(self):
self.keyboard = None
self.updateType = BaseScreen.update_all
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train an MLP on MNIST using K-FAC.
This library fits a 3-layer, tanh-activated MLP on MNIST using K-FAC. After
~25k steps, this should reach perfect accuracy on the training set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.kfac.examples import mnist
lc = tf.contrib.kfac.layer_collection
opt = tf.contrib.kfac.optimizer
__all__ = [
"fc_layer",
"train_mnist",
"train_mnist_multitower",
]
def fc_layer(layer_id, inputs, output_size):
"""Builds a fully connected layer.
Args:
layer_id: int. Integer ID for this layer's variables.
inputs: Tensor of shape [num_examples, input_size]. Each row corresponds
to a single example.
output_size: int. Number of output dimensions after fully connected layer.
Returns:
preactivations: Tensor of shape [num_examples, output_size]. Values of the
layer immediately before the activation function.
activations: Tensor of shape [num_examples, output_size]. Values of the
layer immediately after the activation function.
params: Tuple of (weights, bias), parameters for this layer.
"""
# TODO(b/67004004): Delete this function and rely on tf.layers exclusively.
layer = tf.layers.Dense(
output_size,
kernel_initializer=tf.random_normal_initializer(),
name="fc_%d" % layer_id)
preactivations = layer(inputs)
activations = tf.nn.tanh(preactivations)
# layer.weights is a list. This converts it a (hashable) tuple.
return preactivations, activations, (layer.kernel, layer.bias)
def build_model(examples, labels, num_labels, layer_collection):
"""Builds an MLP classification model.
Args:
examples: Tensor of shape [num_examples, num_features]. Represents inputs of
model.
labels: Tensor of shape [num_examples]. Contains integer IDs to be predicted
by softmax for each example.
num_labels: int. Number of distinct values 'labels' can take on.
layer_collection: LayerCollection instance describing model architecture.
Returns:
loss: 0-D Tensor representing loss to be minimized.
accuracy: 0-D Tensor representing model's accuracy.
"""
# Build an MLP. For each layer, we'll keep track of the preactivations,
# activations, weights, and bias.
pre0, act0, params0 = fc_layer(layer_id=0, inputs=examples, output_size=128)
pre1, act1, params1 = fc_layer(layer_id=1, inputs=act0, output_size=64)
pre2, act2, params2 = fc_layer(layer_id=2, inputs=act1, output_size=32)
logits, _, params3 = fc_layer(layer_id=3, inputs=act2, output_size=num_labels)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
accuracy = tf.reduce_mean(
tf.cast(tf.equal(labels, tf.argmax(logits, axis=1)), dtype=tf.float32))
# Register parameters. K-FAC needs to know about the inputs, outputs, and
# parameters of each layer and the logits powering the posterior probability
# over classes.
tf.logging.info("Building LayerCollection.")
layer_collection.register_fully_connected(params0, examples, pre0)
layer_collection.register_fully_connected(params1, act0, pre1)
layer_collection.register_fully_connected(params2, act1, pre2)
layer_collection.register_fully_connected(params3, act2, logits)
layer_collection.register_categorical_predictive_distribution(
logits, name="logits")
return loss, accuracy
def minimize(loss, accuracy, layer_collection, session_config=None):
"""Minimize 'loss' with KfacOptimizer.
Args:
loss: 0-D Tensor. Loss to be minimized.
accuracy: 0-D Tensor. Accuracy of classifier on current minibatch.
layer_collection: LayerCollection instance. Describes layers in model.
session_config: tf.ConfigProto. Configuration for tf.Session().
Returns:
accuracy of classifier on final minibatch.
"""
# Train with K-FAC. We'll use a decreasing learning rate that's cut in 1/2
# every 10k iterations.
tf.logging.info("Building KFAC Optimizer.")
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=tf.train.exponential_decay(
0.00002, global_step, 10000, 0.5, staircase=True),
cov_ema_decay=0.95,
damping=0.0001,
layer_collection=layer_collection,
momentum=0.99)
train_op = optimizer.minimize(loss, global_step=global_step)
tf.logging.info("Starting training.")
with tf.train.MonitoredTrainingSession(config=session_config) as sess:
while not sess.should_stop():
# K-FAC has 3 primary ops,
# - train_op: Update the weights with the minibatch's gradient.
# - cov_update_op: Update statistics used for building K-FAC's
# preconditioner matrix.
# - inv_update_op: Update preconditioner matrix using statistics.
#
# The first 2 of these are cheap and should be done with each step. The
# latter is more expensive, and should be updated ~100 iterations.
global_step_, loss_, accuracy_, _, _ = sess.run(
[global_step, loss, accuracy, train_op, optimizer.cov_update_op])
if global_step_ % 100 == 0:
sess.run(optimizer.inv_update_op)
if global_step_ % 100 == 0:
tf.logging.info("global_step: %d | loss: %f | accuracy: %f",
global_step_, loss_, accuracy_)
return accuracy_
def train_mnist(data_dir, num_epochs, use_fake_data=False):
"""Train an MLP on MNIST.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tf.logging.info("Loading MNIST into memory.")
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=64,
flatten_images=True,
use_fake_data=use_fake_data)
# Build an MLP. The model's layers will be added to the LayerCollection.
tf.logging.info("Building model.")
layer_collection = lc.LayerCollection()
loss, accuracy = build_model(examples, labels, 10, layer_collection)
# Fit model.
minimize(loss, accuracy, layer_collection)
def train_mnist_multitower(data_dir,
num_epochs,
num_towers,
use_fake_data=False):
"""Train an MLP on MNIST, splitting the minibatch across multiple towers.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
num_towers: int. Number of CPUs to split minibatch across.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tower_batch_size = 64
batch_size = tower_batch_size * num_towers
tf.logging.info(
("Loading MNIST into memory. Using batch_size = %d = %d towers * %d "
"tower batch size.") % (batch_size, num_towers, tower_batch_size))
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=batch_size,
flatten_images=True,
use_fake_data=use_fake_data)
# Split minibatch across towers.
examples = tf.split(examples, num_towers)
labels = tf.split(labels, num_towers)
# Build an MLP. Each tower's layers will be added to the LayerCollection.
layer_collection = lc.LayerCollection()
tower_results = []
for tower_id in range(num_towers):
with tf.device("/cpu:%d" % tower_id):
with tf.name_scope("tower%d" % tower_id):
with tf.variable_scope(tf.get_variable_scope(), reuse=(tower_id > 0)):
tf.logging.info("Building tower %d." % tower_id)
tower_results.append(
build_model(examples[tower_id], labels[tower_id], 10,
layer_collection))
losses, accuracies = zip(*tower_results)
# Average across towers.
loss = tf.reduce_mean(losses)
accuracy = tf.reduce_mean(accuracies)
# Fit model.
session_config = tf.ConfigProto(
allow_soft_placement=False, device_count={
"CPU": num_towers
})
return minimize(
loss, accuracy, layer_collection, session_config=session_config)
|
|
#! /usr/bin/env python
#
# Copyright (C) 2007-2014 Rich Lewis <rl403@cam.ac.uk>
# License: MIT
# -*- coding: utf-8 -*-
# pylint: disable=too-many-instance-attributes,too-many-arguments
""" Module implementing the creation of synergy map objects. """
from collections import defaultdict
from rdkit.Chem.Draw import MolToFile
import pandas as pd
import networkx as nx
import json
import os
class SynergyMap(object):
"""A synergy map class created from combination and chemical data.
Generates a JSON file required for the web app to display the map.
"""
def __init__(self, compound_df, combination_df,
activity_types, synergy_types,
representation_types, reduction_types,
metadata):
"""Create a synergy map object.
Args:
compound_df (pandas.DataFrame): compound scikit-chem dataframe
combination_df (pandas.DataFrame): combination dataframe
activity_types (list): List of activity types
The list of activity types that are present in the
compound dataframe
synergy_types (list): list of synergy types
A list of synergy types that are present in the
combination dataframe
representations (list): a list of representation types to use
A list of representation types to use to generate the maps
reductions_types (list): a list of reduction methods to use
A list of reduction methods to use to generate the maps
metadata (string): description of the dataset
A string description of the dataset.
"""
self.compounds = compound_df
self.combinations = combination_df
self.representation_types = representation_types
self.reduction_types = reduction_types
self.activity_types = activity_types
self.synergy_types = synergy_types
self.metadata = metadata
self.generate_coordinates()
self.generate_graph()
self.generate_metadata()
#self.generate_comp_svgs()
def generate_metadata(self):
"""Draw the metadata out of all objects, to make single metadata object
Returns:
dict: dictionary for the different types of metadata.
"""
self.dataset_metadata = {
'representationTypes': [rep.to_dict() for rep in self.representation_types],
'dimensionalityReductionTypes': [red.to_dict() for red in self.reduction_types],
'activityTypes': [act.to_dict() for act in self.activity_types],
'synergyTypes': [syn.to_dict() for syn in self.synergy_types],
'dataset': self.metadata
}
return self.dataset_metadata
def generate_coordinates(self):
""" calculate coordinates to use in the synergy map
Iterate through every combination of representation type and
reduction method, applying them and saving the resultant dataframes
in the coordinates dictionary.
Returns:
dict: multi level dict of coordinates
dict -> rep -> red -> (x, y)
"""
self.coordinates = defaultdict(dict)
for rep in self.representation_types:
for red in self.reduction_types:
self.coordinates[rep.name][red.name] = red(rep(self.compounds))
def generate_graph(self):
"""create networkX graph for the dataset
Returns:
graph (networkx.Graph) the graph object for the dataset."""
graph = nx.Graph()
graph.add_nodes_from(
(idx, row) for (idx, row) in self.compounds.iterrows())
graph.add_edges_from(
(rows['ColId'], rows['RowId'], rows.drop(['ColId', 'RowId']))
for (idx, rows) in self.combinations.iterrows())
return graph
def generate_comp_svgs(self):
"""Create SVG images of the compounds.
The images are inserted directly into the images directory
of the frontend.
Notes:
It is expected that the user has installed an svg capable
renderer for rdkit. See http://www.rdkit.org for details.
Returns:
None
"""
structure_path = os.path.join(
os.path.dirname(__file__),
'../../frontend/app/data/images'
)
self.compounds.apply(
lambda r: MolToFile(
r.structure,
os.path.join(
structure_path,
'{}-{}.svg'.format(r.name, r['name'])
)
),
axis=1
)
def to_json(self):
"""Generate a JSON representation from the constructed Synergy Map.
Returns:
str: a string containing the json.
"""
coords = json.loads(pd.json.dumps(self.coordinates, orient='index'))
combs = self.combinations.reset_index().to_dict(orient='records')
syn_types = [s.name for s in self.synergy_types]
new_combs = []
for comb in combs:
new_comb = {}
new_comb['id'] = comb['id']
new_comb['RowId'] = comb['RowId']
new_comb['ColId'] = comb['ColId']
new_comb['synergies'] = {
k: v for k, v in comb.iteritems() if k in syn_types}
new_comb['properties'] = {
k: v for k, v in comb.iteritems() if k not in syn_types +
['id', 'RowId', 'ColId']}
new_combs.append(new_comb)
comps = self.compounds.drop('structure', axis=1)\
.reset_index()\
.to_dict(orient='records')
act_types = [a.name for a in self.activity_types]
new_comps = []
for comp in comps:
new_comp = {}
new_comp['id'] = comp['id']
new_comp['name'] = comp['name']
new_comp['activities'] = {
k: v for k, v in comp.iteritems() if k in act_types}
new_comp['properties'] = {
k: v for k, v in comp.iteritems() if k not in act_types +
['id', 'name']}
new_comps.append(new_comp)
dataset = {
'compounds': new_comps,
'combinations': new_combs,
'coordinates': coords,
'metadata': self.dataset_metadata
}
return json.dumps(dataset)
|
|
from __future__ import print_function
import yaml
import sys, os, time
import shlex, shutil
import tarfile, gzip, StringIO
from . import checkout
from xmlrpclib import Binary
import logging
log = logging.getLogger(__name__)
VALID_TYPES = {"abstract", "shared", "builder", "default"}
# Allow specfiles to know where the metadata is located
if not "RAILGUN_PACKAGE" in os.environ:
os.environ["RAILGUN_PACKAGE"] = os.path.dirname(__file__)
if not "RAILGUN_FILES" in os.environ:
f = os.path.join(os.path.dirname(__file__), "..", "meta")
if os.path.isdir(f):
os.environ["RAILGUN_FILES"] = os.path.abspath(f)
else:
f = os.path.join(sys.prefix, "railgun")
if os.path.isdir(f):
os.environ["RAILGUN_FILES"] = os.path.abspath(f)
def get_builders_dir():
return os.path.join(os.path.dirname(__file__), '..', 'meta', 'builders')
def get_services_dir():
return os.path.join(os.path.dirname(__file__), '..', 'meta', 'services')
class Project(object):
"Represents a specfile (can hold multiple services)"
services = None
filename = None
raw_docker = False
name = None
def __init__(self, source=None):
self.services = []
if not source: source = "."
if os.path.isdir(source):
self.name = os.path.basename(source)
if os.path.exists(os.path.join(source, "services.yml")):
source = os.path.join(source, "services.yml")
elif os.path.exists(os.path.join(source, "Dockerfile")):
self.raw_docker = True
else:
raise ValueError("Directory is not a Railgun project (nor contains a Dockerfile)")
elif not os.path.exists(source):
if not source.endswith(".yml"):
source = "%s.yml" % source
if not os.path.exists(source):
raise ValueError("File '%s' does not exist" % source)
self.name = os.path.splitext(os.path.basename(source))[0]
else:
self.name = os.path.splitext(os.path.basename(source))[0]
if self.raw_docker:
# A Dockerfile-based spec
self.filename = os.path.join(source, "Dockerfile")
name = os.path.basename(source)
self.services.append(Service(name, {'builder':None}, self))
else:
# A services.yml-based spec
self.filename = source
self.root = yaml.load(file(source))
if not isinstance(self.root, dict):
self._fail("Invalid specfile, expected objects at root level")
for n,inst in self.root.items():
self.services.append(Service(n, inst, self))
self.validate()
def push_to(self, host, push_dependencies=False, force_update=False, no_cache=False):
for service in self.services:
service.push_to(host, force_update=force_update, no_cache=no_cache)
def get_service(self, svc):
for service in self.services:
if service.name == svc:
return service
raise KeyError("Service '%s' not defined" % svc)
def __repr__(self):
return yaml.dump(self.as_dict())
def as_dict(self):
d = {}
for service in self.services:
d[service.name] = service.as_dict()
return d
def validate(self):
names = set()
for service in self.services:
nm = service.qualified_name()
if nm in names:
self._fail("Duplicate service name '%s'" % nm)
names.add(nm)
service.validate()
def _fail(self, message):
if self.filename:
message = "%s: %s" % (self.filename, message)
raise ValueError(message)
class Service(object):
"Represents a service"
__root = None
project = None
name = None
resolved = False
def __init__(self, name, root, parent):
self.__root = root
self.name = name
self.project = parent
def validate(self):
self.type()
def qualified_name(self, short_name=False):
q = self.__root.get("namespace")
nm = self.name
if self.type() == "builder" and not nm.startswith("builder."):
nm = "builder.%s" % nm
if q and not short_name:
return "%s.%s" % (nm, q)
return nm
def container_name(self):
return self.qualified_name()
def get_builder(self):
"Returns the logical name of the builder to use"
builder = self.__root.get("builder", "base")
if checkout.is_url_or_file(builder):
return builder
if not builder.startswith("builder."):
builder = "builder.%s" % builder
return builder
def update_dependencies(self, host, dependencies=False, force_update=False):
if dependencies:
# TODO
pass
def update_prerequisite(self, reference, host, dependencies=False, force_update=True, no_cache=False, stack=None):
if reference in ("builder.none",):
return reference
if stack is None:
stack = [(os.path.realpath(self.project.filename),self.name)]
log.info("Updating prerequisite '%s'" % reference)
service = None
if checkout.is_url_or_file(reference):
dest = self.checkout(reference)
log.info("Checked out dependency URL %s to %s" % (reference, dest))
svc = None
# Explicit reference name
if '#' in reference:
svc = reference.split('#')[-1]
proj = Project(dest)
if svc:
service = proj.get_service(svc)
else:
if len(proj.services) != 1:
raise ValueError("Ambiguous service reference in '%s'" % reference)
service = proj.services[0]
ent = (os.path.realpath(proj.filename), service.name)
if ent in stack:
raise Exception("Cyclic dependency towards '%s'" % ent)
stack.append(ent)
service.push_to(host, None, dependencies, force_update=force_update, no_cache=no_cache, stack=stack)
reference = service.qualified_name()
else:
log.debug("Checking service by logical name '%s'" % reference)
if force_update or not host.check_container_exists(reference):
log.info("Updating dependency/builder container '%s'" % reference)
if not service:
# Try to locate service source
# First in project
for svc in self.project.services:
if svc.qualified_name() == reference or svc.qualified_name(False) == reference:
service = svc
break
# Then in builtins
if reference.startswith("builder."):
pfxdir = get_builders_dir()
else:
pfxdir = get_services_dir()
for f in os.listdir(pfxdir):
if service: break
try:
proj = Project(os.path.join(pfxdir, f))
except:
continue
for svc in self.project.services:
if svc.qualified_name() == reference or svc.qualified_name(False) == reference:
service = svc
break
if not service:
raise Exception("Reference to service '%s' could not be resolved." % reference)
return reference
def push_to(self, host, name=None, push_dependencies=False, force_update=False, no_cache=False, stack=None):
if name is None:
services = host.get_service_instances(self.name)
if not services:
services = [self.name]
else:
services = [name]
builder = self.update_prerequisite(self.get_builder(), host, True, force_update, no_cache=no_cache)
log.debug("Will build %s using builder '%s'" % (', '.join(services), builder))
proc = host._build_service_container(builder, self.container_name(), no_cache=no_cache)
# Should we download remote files, or should we let the target do it?
remote = host.should_download_remote_files()
if builder == 'builder.none':
# Bootstrap builder cannot download files
remote = False
# Wrap the pipe, since tarfile insists on calling 'tell'
class TarPipeWrapper(object):
fo = None
def __init__(self, fo):
self.fo = fo
def tell(self):
return 0
def write(self, *a, **kw):
self.fo.write(*a, **kw)
# Package to process input
self.package(TarPipeWrapper(proc.stdin), remote=remote)
last=""
proc.stdin.close()
proc.wait()
if proc.returncode:
raise Exception("Container build failed. See error messages above")
for service in services:
d = os.path.dirname(self.project.filename)
#host.publish_files(d, post_cmd=["pwd", "ls -lh"])
#host.exec_shell("docker build")
def get_build_dir(self, create=True):
"Return the directory used for temporary build files, optionally creating it"
dirname = os.path.join(os.path.dirname(self.project.filename),
"build", self.project.name, self.name)
if create and not os.path.isdir(dirname):
os.makedirs(dirname)
return dirname
def is_pure_container(self):
"Returns True if the project can be transformed to a pure Dockerfile"
container = self.__root.get("container")
if container and container.get("url"):
# Needs to be transformed on host
if self.get_additional_dockerfile_instructions():
return False
return True
def get_projectfile(self):
if self.project.raw_docker:
raise Exception("Raw Dockerfile projects do not have project files")
return self.project.filename
def get_dockerfile(self, local=True, remote=True, path=None):
"Return the contents of the Dockerfile as a list of rows, or None if Dockerfile cannot be generated"
rootdir = os.path.dirname(self.project.filename)
if not path:
container = self.__root.get("container")
if container:
path = container.get("dockerfile")
if path:
if os.path.isdir(path):
path = os.path.join(path, "Dockerfile")
for line in file(os.path.join(rootdir, path)):
line = line.strip().split('#',1)[0]
if not line: continue
yield line
for line in self.get_additional_dockerfile_instructions():
if line: yield line
def get_dockerfile_referenced_files(self, dockerfile):
"Gets the local files referenced by the generated Dockerfile"
rootdir = os.path.dirname(self.project.filename)
for line in dockerfile:
args = shlex.split(line)
if args[0].lower() == 'add':
fn = args[1]
if '://' in fn: continue
fna = os.path.join(rootdir, fn)
fn = os.path.relpath(fna,rootdir)
if not fna.startswith('../') and os.path.exists(fna):
yield fna
def get_additional_dockerfile_instructions(self):
if self.project.raw_docker:
return
if False:
yield ''
return
def checkout(self, url, update_existing=False):
"Checks out an URL into the build directory for further processing"
rootdir = os.path.dirname(self.project.filename)
pth = checkout.get_local_file(url, rootdir)
if pth:
return pth
else:
scm = checkout.get_scm_provider(url)
if scm:
dest = scm.get_destination_name(url)
scm.checkout(url, self.get_build_dir(), update_existing=update_existing)
return os.path.join(self.get_build_dir(), dest)
else:
raise ValueError("Url '%s' not possible to check out" % url)
def package(self, outfile, update=False, local=True, remote=True):
"""
Download all local resources required to build this service and write a tar stream to the output file.
"""
log.debug("Packaging and streaming %s" % self.name)
with TarPackaging(outfile) as tar:
self._build(tar, update, local, remote, True)
log.debug("Packaged %s" % self.name)
def build(self, update=False, local=True, remote=True, write=False):
"""
Download all local/remote resources required to build this service and generate build instructions.
"""
log.debug("Building %s" % self.name)
dockerfile = write and not os.path.exists(os.path.join(self.get_directory(), "Dockerfile"))
self._build(LocalPackaging(self.get_directory(), write), update, local, remote, dockerfile)
def _build(self, packaging, update, local, remote, dockerfile):
def should_handle(url):
lcl = checkout.url_is_local(url)
return lcl and local or not lcl and remote
# Create mutable copy of service
resolved = dict(self.__root)
rootdir = os.path.dirname(self.project.filename)
container = resolved.get("container")
if container:
url = container.get("url")
if url and should_handle(url):
dest = self.checkout(url)
del container["url"]
if "subdirectory" in container:
dest = os.path.join(dest, container["subdirectory"])
del container["subdirectory"]
dockerfile = os.path.join(dest, "Dockerfile")
if not os.path.exists(dockerfile):
raise Exception("No Dockerfile found in %s (%s)" % (dest, url))
container["dockerfile"] = os.path.relpath(dockerfile, rootdir)
for name,src in container.get("files", dict()).items():
packaging.addmap(src, name)
df = list(self.get_dockerfile(local, remote))
if df and "dockerfile" in container:
packaging.addstr('\n'.join(df), 'Dockerfile')
packaging.addstr(yaml.dump({self.name:resolved}), 'services.yml')
# Add locally cached artifacts, if any
d = self.get_build_dir(False)
if os.path.isdir(d):
packaging.addrel(d, rootdir)
for f in self.get_dockerfile_referenced_files(df):
packaging.addrel(f, rootdir)
def get_directory(self):
return os.path.dirname(self.project.filename)
def type(self):
t = self.__root.get("type", "default")
if t in VALID_TYPES:
return t
else:
self._fail("Service type '%s' is not recognized" % t)
def is_abstract(self):
return self.type() == "abstract"
def as_dict(self):
return self.__root
def __repr__(self):
return yaml.dump(self.as_dict())
def _fail(self, message):
return self.project._fail(message)
class LocalPackaging:
def __init__(self, root, write):
self.root = root
self.write = write
def addrel(self, filename, rootdir):
arcname = os.path.relpath(filename, rootdir)
log.debug("[f] %s" % arcname)
return arcname
def addstr(self, string, filename):
log.debug("[g] %s:\n %s" % (filename, '\n '.join(string.split('\n'))))
class TarPackaging(tarfile.TarFile):
def __init__(self, outfile):
tarfile.TarFile.__init__(self, fileobj=outfile, mode='w')
def addrel(self, filename, rootdir):
arcname = os.path.relpath(filename, rootdir)
self.add(filename, arcname=arcname)
log.debug("[F] %s" % arcname)
return arcname
def addstr(self, string, filename):
tarinfo = tarfile.TarInfo(name=filename)
tarinfo.size = len(string)
tarinfo.mtime = time.time()
self.addfile(tarinfo=tarinfo, fileobj=StringIO.StringIO(string))
log.debug("[G] %s" % filename)
def addmap(self, src, arcname):
src = os.path.expandvars(src)
self.add(src, arcname=arcname)
log.debug("[M] %s (%s)" % (arcname, src))
if __name__ == '__main__':
print(Spec(file(sys.argv[1]), filename=sys.argv[1]))
|
|
# Copyright 2012, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
from itertools import izip
import logging
import re
from net import bsonrpc
from net import gorpc
from vtdb import dbexceptions
from vtdb import field_types
from vtdb import vtdb_logger
_errno_pattern = re.compile(r'\(errno (\d+)\)')
def handle_app_error(exc_args):
msg = str(exc_args[0]).lower()
# Operational Error
if msg.startswith('retry'):
return dbexceptions.RetryError(exc_args)
if msg.startswith('fatal'):
return dbexceptions.FatalError(exc_args)
if msg.startswith('tx_pool_full'):
return dbexceptions.TxPoolFull(exc_args)
# Integrity and Database Error
match = _errno_pattern.search(msg)
if match:
# Prune the error message to truncate after the mysql errno, since
# the error message may contain the query string with bind variables.
mysql_errno = int(match.group(1))
if mysql_errno == 1062:
parts = _errno_pattern.split(msg)
pruned_msg = msg[:msg.find(parts[2])]
new_args = (pruned_msg,) + tuple(exc_args[1:])
return dbexceptions.IntegrityError(new_args)
# TODO(sougou/liguo): remove this case once servers are deployed
elif mysql_errno == 1290 and 'read-only' in msg:
return dbexceptions.RetryError(exc_args)
return dbexceptions.DatabaseError(exc_args)
def convert_exception(exc, *args):
new_args = exc.args + args
if isinstance(exc, gorpc.TimeoutError):
return dbexceptions.TimeoutError(new_args)
elif isinstance(exc, gorpc.AppError):
return handle_app_error(new_args)
elif isinstance(exc, gorpc.ProgrammingError):
return dbexceptions.ProgrammingError(new_args)
elif isinstance(exc, gorpc.GoRpcError):
return dbexceptions.FatalError(new_args)
return exc
class TabletConnection(object):
"""A simple, direct connection to the vttablet query server.
This is shard-unaware and only handles the most basic communication.
If something goes wrong, this object should be thrown away and a new
one instantiated.
"""
def __init__(
self, addr, tablet_type, keyspace, shard, timeout, user=None,
password=None, keyfile=None, certfile=None, caller_id=None):
self.transaction_id = 0
self.session_id = 0
self.addr = addr
self.caller_id = caller_id
self.certfile = certfile
self.keyfile = keyfile
self.keyspace = keyspace
self.password = password
self.shard = shard
self.tablet_type = tablet_type
self.timeout = timeout
self.user = user
self.client = self._create_client()
self.logger_object = vtdb_logger.get_logger()
def _create_client(self):
return bsonrpc.BsonRpcClient(
self.addr, self.timeout, self.user, self.password,
keyfile=self.keyfile, certfile=self.certfile)
def _get_client(self):
"""Get current client or create a new one and connect."""
# TODO(dumbunny): Merge? This is very similar to vtgatev2.
if not self.client:
self.client = self._create_client()
try:
self.client.dial()
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
return self.client
def __str__(self):
return '<TabletConnection %s %s %s/%s>' % (
self.addr, self.tablet_type, self.keyspace, self.shard)
def dial(self):
try:
if self.session_id:
self.client.close()
# This will still allow the use of the connection - a second
# redial will succeed. This is more a hint that you are doing
# it wrong and misunderstanding the life cycle of a
# TabletConnection.
# raise dbexceptions.ProgrammingError(
# 'attempting to reuse TabletConnection')
self._get_client().dial()
req = {
'Params': {
'Keyspace': self.keyspace,
'Shard': self.shard
},
'ImmediateCallerID': {'Username': self.caller_id}
}
response = self.rpc_call_and_extract_error(
'SqlQuery.GetSessionId2', req)
self.session_id = response.reply['SessionId']
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def close(self):
# rollback if possible, but ignore failures
try:
self.rollback()
except Exception:
pass
self.session_id = 0
if self.client:
self.client.close()
def is_closed(self):
return not self.client or self.client.is_closed()
def begin(self, effective_caller_id=None):
_ = effective_caller_id
if self.transaction_id:
raise dbexceptions.NotSupportedError('Nested transactions not supported')
req = {
'ImmediateCallerID': {'Username': self.caller_id},
'SessionId': self.session_id
}
try:
response = self.rpc_call_and_extract_error('SqlQuery.Begin2', req)
self.transaction_id = response.reply['TransactionId']
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def commit(self):
if not self.transaction_id:
return
req = {
'ImmediateCallerID': {'Username': self.caller_id},
'TransactionId': self.transaction_id,
'SessionId': self.session_id
}
# NOTE(msolomon) Unset the transaction_id irrespective of the RPC's
# response. The intent of commit is that no more statements can be made on
# this transaction, so we guarantee that. Transient errors between the
# db and the client shouldn't affect this part of the bookkeeping.
# Do this after fill_session, since this is a critical part.
self.transaction_id = 0
try:
response = self.rpc_call_and_extract_error('SqlQuery.Commit2', req)
return response.reply
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def rollback(self):
if not self.transaction_id:
return
req = {
'ImmediateCallerID': {'Username': self.caller_id},
'TransactionId': self.transaction_id,
'SessionId': self.session_id
}
# NOTE(msolomon) Unset the transaction_id irrespective of the RPC. If the
# RPC fails, the client will still choose a new transaction_id next time
# and the tablet server will eventually kill the abandoned transaction on
# the server side.
self.transaction_id = 0
try:
response = self.rpc_call_and_extract_error('SqlQuery.Rollback2', req)
return response.reply
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def rpc_call_and_extract_error(self, method_name, request):
"""Makes an RPC, extracts any app error that's embedded in the reply.
Args:
method_name: RPC method name, as a string, to call.
request: Request to send to the RPC method call.
Returns:
Response from RPC.
Raises:
gorpc.AppError if there is an app error embedded in the reply
"""
response = self._get_client().call(method_name, request)
reply = response.reply
if not reply or not isinstance(reply, dict):
return response
# Handle the case of new client => old server
err = reply.get('Err', None)
if err:
if not isinstance(reply, dict) or 'Message' not in err:
raise gorpc.AppError('Missing error message', method_name)
raise gorpc.AppError(reply['Err']['Message'], method_name)
return response
def _execute(self, sql, bind_variables):
req = {
'QueryRequest': {
'Sql': sql,
'BindVariables': field_types.convert_bind_vars(bind_variables),
'SessionId': self.session_id,
'TransactionId': self.transaction_id
},
'ImmediateCallerID': {'Username': self.caller_id}
}
fields = []
conversions = []
results = []
try:
response = self.rpc_call_and_extract_error('SqlQuery.Execute2', req)
reply = response.reply
for field in reply['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in reply['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = reply['RowsAffected']
lastrowid = reply['InsertId']
except gorpc.GoRpcError as e:
self.logger_object.log_private_data(bind_variables)
raise convert_exception(e, str(self), sql)
except Exception:
logging.exception('gorpc low-level error')
raise
return results, rowcount, lastrowid, fields
def _execute_batch(self, sql_list, bind_variables_list, as_transaction):
query_list = []
for sql, bind_vars in zip(sql_list, bind_variables_list):
query = {}
query['Sql'] = sql
query['BindVariables'] = field_types.convert_bind_vars(bind_vars)
query_list.append(query)
rowsets = []
try:
req = {
'QueryBatch': {
'Queries': query_list,
'SessionId': self.session_id,
'AsTransaction': as_transaction,
'TransactionId': self.transaction_id
},
'ImmediateCallerID': {'Username': self.caller_id}
}
response = self.rpc_call_and_extract_error('SqlQuery.ExecuteBatch2', req)
for reply in response.reply['List']:
fields = []
conversions = []
results = []
rowcount = 0
for field in reply['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in reply['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = reply['RowsAffected']
lastrowid = reply['InsertId']
rowsets.append((results, rowcount, lastrowid, fields))
except gorpc.GoRpcError as e:
self.logger_object.log_private_data(bind_variables_list)
raise convert_exception(e, str(self), sql_list)
except Exception:
logging.exception('gorpc low-level error')
raise
return rowsets
# we return the fields for the response, and the column conversions
# the conversions will need to be passed back to _stream_next
# (that way we avoid using a member variable here for such a corner case)
def _stream_execute(self, sql, bind_variables):
req = {
'Query': {
'Sql': sql,
'BindVariables': field_types.convert_bind_vars(bind_variables),
'SessionId': self.session_id,
'TransactionId': self.transaction_id
},
'ImmediateCallerID': {'Username': self.caller_id}
}
rpc_client = self._get_client()
stream_fields = []
stream_conversions = []
def drain_conn_after_streaming_app_error():
"""Drain connection of all incoming streaming packets (ignoring them).
This is necessary for streaming calls which return application
errors inside the RPC response (instead of through the usual GoRPC
error return). This is because GoRPC always expects the last
packet to be an error; either the usual GoRPC application error
return, or a special "end-of-stream" error.
If an application error is returned with the RPC response, there
will still be at least one more packet coming, as GoRPC has not
seen anything that it considers to be an error. If the connection
is not drained of this last packet, future reads from the wire
will be off by one and will return errors.
"""
next_result = rpc_client.stream_next()
if next_result is not None:
rpc_client.close()
raise gorpc.GoRpcError(
'Connection should only have one packet remaining'
' after streaming app error in RPC response.')
try:
rpc_client.stream_call('SqlQuery.StreamExecute2', req)
first_response = rpc_client.stream_next()
reply = first_response.reply
if reply.get('Err'):
drain_conn_after_streaming_app_error()
raise gorpc.AppError(reply['Err'].get(
'Message', 'Missing error message'))
for field in reply['Fields']:
stream_fields.append((field['Name'], field['Type']))
stream_conversions.append(
field_types.conversions.get(field['Type']))
except gorpc.GoRpcError as e:
self.logger_object.log_private_data(bind_variables)
raise convert_exception(e, str(self), sql)
except Exception:
logging.exception('gorpc low-level error')
raise
# Take the BsonRpcClient from VTGateConnection. The row_generator
# will manage the BsonRpcClient. This VTGateConnection will connect
# to a new client if needed.
self.client = None
def row_generator():
try:
while True:
try:
stream_result = rpc_client.stream_next()
if stream_result is None:
break
if stream_result.reply.get('Err'):
drain_conn_after_streaming_app_error()
raise gorpc.AppError(stream_result.reply['Err'].get(
'Message', 'Missing error message'))
for result_item in stream_result.reply['Rows']:
yield tuple(_make_row(result_item, stream_conversions))
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
except Exception:
logging.exception('gorpc low-level error')
raise
finally:
rpc_client.close()
return row_generator(), stream_fields
def _make_row(row, conversions):
converted_row = []
for conversion_func, field_data in izip(conversions, row):
if field_data is None:
v = None
elif conversion_func:
v = conversion_func(field_data)
else:
v = field_data
converted_row.append(v)
return converted_row
def connect(*pargs, **kargs):
conn = TabletConnection(*pargs, **kargs)
conn.dial()
return conn
|
|
#!/usr/bin/python
import gevent
import logging
import dpkt.tftp as tftp
import gevent.socket as _socket
import tftpz.util
from itertools import count
class TftpException(Exception):
@classmethod
def from_other_exception(cls, pyexception):
return cls(tftp.EUNDEF, str(pyexception))
def __init__(self, code, message):
self.code = code
self.message = message
Exception.__init__(self, code, message)
class TftpNotFoundException(TftpException):
def __init__(self):
TftpException.__init__(self, tftp.ENOTFOUND, "Not Found")
class TftpServerListener(gevent.Greenlet):
"""
Binds to a socket and listens for TFTP requests, calling
handle() on the specified handler class. Implemented as
a gevent Greenlet.
"""
_bufsize = 1500
_max_send_tries = 4
def __init__(self, ip_address, handler, logger=None):
"""
@param ip_address: The IP address we're listening on
@param handler: The class/instance to call handle on
"""
gevent.Greenlet.__init__(self)
self.ip_address = ip_address
self.handler = handler
self.logger = logger or logging.getLogger(self.__class__.__name__)
self.iface_name, self.ip_config = tftpz.util.network_config()[ip_address]
self._keepgoing = True
self.sock = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM)
self.sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_BINDTODEVICE, self.iface_name)
self.sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1)
self.sock.bind(('', 69))
def _run(self):
"""
Primary logic function which is the entry point for the listener.
"""
while self._keepgoing:
data, (host, port) = self.sock.recvfrom(self._bufsize)
packet = tftp.TFTP(data)
gevent.spawn(self._handle_packet, packet, (host, port))
def _handle_packet(self, packet, (host, port)):
if packet.opcode == tftp.OP_RRQ:
handler = getattr(self, "_handle_get", None)
else:
handler = None
if handler:
try:
sock = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM)
sock.bind((self.ip_address, 0))
sock.settimeout(1)
handler(sock, (host, port), packet)
except TftpException as ex:
#self.logger.debug("TftpException: %r" % ex)
self._send_error(sock, ex, (host, port))
except Exception as ex:
self.logger.error("Uncaught Exception: %r" % ex)
ex2 = TftpException.from_other_exception(ex)
self._send_error(sock, ex2, (host, port))
raise
def stop(self):
self._keepgoing = False
self.kill()
def _handle_get(self, sock, (host, port), packet):
"""
Wrapper method around the true handle_get.
@param sock: The socket for the host we're handling the request for
@param host: The host of the requestor
@param port: The port of the requestor
@param packet: The TFTP packet
"""
handler = getattr(self.handler, "handle_get", None)
if handler:
fileobj = handler(packet.filename)
self._send_file(sock, (host, port), fileobj)
def _send_data(self, sock, (host, port), data, block):
"""
Helper function, called by _send_file which sends a block of data.
@param sock: The socket we're sending to
@param host: The host we're sending to
@param port: The port we're sending to
@param data: The data we're sending
@param block: The block we're on (an int)
"""
pkt = tftp.TFTP()
pkt.opcode = tftp.OP_DATA
pkt.block = block
pkt.data = data
for _ in xrange(self._max_send_tries):
sock.sendto(str(pkt), 0, (host, port))
try:
self._wait_for_ack(sock, block)
return
except _socket.timeout:
continue
raise TftpException(tftp.EUNDEF, "timed out")
def _wait_for_ack(self, sock, blocknum):
"""
The TFTP RFC says we need to wait for the client to ACK...
@param sock: The sock we're waiting for the ACK on
@param blocknum: The block number we're waiting for an ACK for
"""
while True:
recv_str, recv_addr = sock.recvfrom(1024)
ack = tftp.TFTP(recv_str)
if ack.opcode == tftp.OP_ACK and ack.block[0] == blocknum:
return
def _send_file(self, sock, (host, port), fileobj):
"""
Send an entire file to a client, given a file-like object.
@param sock: The socket we're using
@param host: The host of the client
@param port: The port of the client
@param fileobj: The file to be sent
"""
for block in count(1):
data = fileobj.read(512)
self._send_data(sock, (host, port), data, block)
if len(data) < 512:
break
def _send_error(self, sock, e, addr):
"""
Send an error message back to the client.
@param sock: The socket to use
@param e: The exception
"""
#self.logger.debug("client %s:%d got error %d %r" % (addr + (e.code, e.message)))
pkt = tftp.TFTP()
pkt.opcode = tftp.OP_ERR
pkt.errcode = e.code
pkt.errmsg = e.message
sock.sendto(str(pkt), 0, addr)
class TftpServer(object):
_listener_factory = TftpServerListener
def __init__(self, handler, logger=None):
self.handler = handler
self.logger = logger or logging.getLogger(self.__class__.__name__)
self._listeners = []
self._running = False
def listen(self, ip_address):
listener = self._listener_factory(ip_address, self.handler, self.logger)
self._listeners.append(listener)
if self._running:
self._launch_listener(listener)
def run(self):
self._running = True
map(self._launch_listener, self._listeners)
for listener in self._listeners:
listener.join()
def stop(self):
self._running = False
for listener in self._listeners:
listener.stop()
def _launch_listener(self, listener):
self.logger.info("listening on %s" % listener.ip_address)
listener.start()
|
|
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import shutil
import tempfile
from os import path
from docker.errors import APIError
from six import StringIO
from six import text_type
from .. import mock
from .testcases import DockerClientTestCase
from .testcases import pull_busybox
from compose import __version__
from compose.const import LABEL_CONTAINER_NUMBER
from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
from compose.const import LABEL_VERSION
from compose.container import Container
from compose.service import build_extra_hosts
from compose.service import ConfigError
from compose.service import ConvergencePlan
from compose.service import Net
from compose.service import Service
def create_and_start_container(service, **override_options):
container = service.create_container(**override_options)
return service.start_container(container)
class ServiceTest(DockerClientTestCase):
def test_containers(self):
foo = self.create_service('foo')
bar = self.create_service('bar')
create_and_start_container(foo)
self.assertEqual(len(foo.containers()), 1)
self.assertEqual(foo.containers()[0].name, 'composetest_foo_1')
self.assertEqual(len(bar.containers()), 0)
create_and_start_container(bar)
create_and_start_container(bar)
self.assertEqual(len(foo.containers()), 1)
self.assertEqual(len(bar.containers()), 2)
names = [c.name for c in bar.containers()]
self.assertIn('composetest_bar_1', names)
self.assertIn('composetest_bar_2', names)
def test_containers_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
self.assertEqual(db.containers(stopped=True), [])
self.assertEqual(db.containers(one_off=True, stopped=True), [container])
def test_project_is_added_to_container_name(self):
service = self.create_service('web')
create_and_start_container(service)
self.assertEqual(service.containers()[0].name, 'composetest_web_1')
def test_start_stop(self):
service = self.create_service('scalingtest')
self.assertEqual(len(service.containers(stopped=True)), 0)
service.create_container()
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
service.start()
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(service.containers(stopped=True)), 1)
service.stop(timeout=1)
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
service.stop(timeout=1)
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
def test_kill_remove(self):
service = self.create_service('scalingtest')
create_and_start_container(service)
self.assertEqual(len(service.containers()), 1)
service.remove_stopped()
self.assertEqual(len(service.containers()), 1)
service.kill()
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
service.remove_stopped()
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_create_container_with_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
self.assertEqual(container.name, 'composetest_db_run_1')
def test_create_container_with_one_off_when_existing_container_is_running(self):
db = self.create_service('db')
db.start()
container = db.create_container(one_off=True)
self.assertEqual(container.name, 'composetest_db_run_1')
def test_create_container_with_unspecified_volume(self):
service = self.create_service('db', volumes=['/var/db'])
container = service.create_container()
service.start_container(container)
self.assertIn('/var/db', container.get('Volumes'))
def test_create_container_with_volume_driver(self):
service = self.create_service('db', volume_driver='foodriver')
container = service.create_container()
service.start_container(container)
self.assertEqual('foodriver', container.get('Config.VolumeDriver'))
def test_create_container_with_cpu_shares(self):
service = self.create_service('db', cpu_shares=73)
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.CpuShares'), 73)
def test_build_extra_hosts(self):
# string
self.assertRaises(ConfigError, lambda: build_extra_hosts("www.example.com: 192.168.0.17"))
# list of strings
self.assertEqual(build_extra_hosts(
["www.example.com:192.168.0.17"]),
{'www.example.com': '192.168.0.17'})
self.assertEqual(build_extra_hosts(
["www.example.com: 192.168.0.17"]),
{'www.example.com': '192.168.0.17'})
self.assertEqual(build_extra_hosts(
["www.example.com: 192.168.0.17",
"static.example.com:192.168.0.19",
"api.example.com: 192.168.0.18"]),
{'www.example.com': '192.168.0.17',
'static.example.com': '192.168.0.19',
'api.example.com': '192.168.0.18'})
# list of dictionaries
self.assertRaises(ConfigError, lambda: build_extra_hosts(
[{'www.example.com': '192.168.0.17'},
{'api.example.com': '192.168.0.18'}]))
# dictionaries
self.assertEqual(build_extra_hosts(
{'www.example.com': '192.168.0.17',
'api.example.com': '192.168.0.18'}),
{'www.example.com': '192.168.0.17',
'api.example.com': '192.168.0.18'})
def test_create_container_with_extra_hosts_list(self):
extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts))
def test_create_container_with_extra_hosts_dicts(self):
extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'}
extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts_list))
def test_create_container_with_cpu_set(self):
service = self.create_service('db', cpuset='0')
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.CpusetCpus'), '0')
def test_create_container_with_read_only_root_fs(self):
read_only = True
service = self.create_service('db', read_only=read_only)
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.ReadonlyRootfs'), read_only, container.get('HostConfig'))
def test_create_container_with_security_opt(self):
security_opt = ['label:disable']
service = self.create_service('db', security_opt=security_opt)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
def test_create_container_with_mac_address(self):
service = self.create_service('db', mac_address='02:42:ac:11:65:43')
container = service.create_container()
service.start_container(container)
self.assertEqual(container.inspect()['Config']['MacAddress'], '02:42:ac:11:65:43')
def test_create_container_with_specified_volume(self):
host_path = '/tmp/host-path'
container_path = '/container-path'
service = self.create_service('db', volumes=['%s:%s' % (host_path, container_path)])
container = service.create_container()
service.start_container(container)
volumes = container.inspect()['Volumes']
self.assertIn(container_path, volumes)
# Match the last component ("host-path"), because boot2docker symlinks /tmp
actual_host_path = volumes[container_path]
self.assertTrue(path.basename(actual_host_path) == path.basename(host_path),
msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
def test_recreate_preserves_volume_with_trailing_slash(self):
"""
When the Compose file specifies a trailing slash in the container path, make
sure we copy the volume over when recreating.
"""
service = self.create_service('data', volumes=['/data/'])
old_container = create_and_start_container(service)
volume_path = old_container.get('Volumes')['/data']
new_container = service.recreate_container(old_container)
self.assertEqual(new_container.get('Volumes')['/data'], volume_path)
def test_duplicate_volume_trailing_slash(self):
"""
When an image specifies a volume, and the Compose file specifies a host path
but adds a trailing slash, make sure that we don't create duplicate binds.
"""
host_path = '/tmp/data'
container_path = '/data'
volumes = ['{}:{}/'.format(host_path, container_path)]
tmp_container = self.client.create_container(
'busybox', 'true',
volumes={container_path: {}},
labels={'com.docker.compose.test_image': 'true'},
)
image = self.client.commit(tmp_container)['Id']
service = self.create_service('db', image=image, volumes=volumes)
old_container = create_and_start_container(service)
self.assertEqual(
old_container.get('Config.Volumes'),
{container_path: {}},
)
service = self.create_service('db', image=image, volumes=volumes)
new_container = service.recreate_container(old_container)
self.assertEqual(
new_container.get('Config.Volumes'),
{container_path: {}},
)
self.assertEqual(service.containers(stopped=False), [new_container])
def test_create_container_with_volumes_from(self):
volume_service = self.create_service('data')
volume_container_1 = volume_service.create_container()
volume_container_2 = Container.create(
self.client,
image='busybox:latest',
command=["top"],
labels={LABEL_PROJECT: 'composetest'},
)
host_service = self.create_service('host', volumes_from=[volume_service, volume_container_2])
host_container = host_service.create_container()
host_service.start_container(host_container)
self.assertIn(volume_container_1.id,
host_container.get('HostConfig.VolumesFrom'))
self.assertIn(volume_container_2.id,
host_container.get('HostConfig.VolumesFrom'))
def test_execute_convergence_plan_recreate(self):
service = self.create_service(
'db',
environment={'FOO': '1'},
volumes=['/etc'],
entrypoint=['top'],
command=['-d', '1']
)
old_container = service.create_container()
self.assertEqual(old_container.get('Config.Entrypoint'), ['top'])
self.assertEqual(old_container.get('Config.Cmd'), ['-d', '1'])
self.assertIn('FOO=1', old_container.get('Config.Env'))
self.assertEqual(old_container.name, 'composetest_db_1')
service.start_container(old_container)
old_container.inspect() # reload volume data
volume_path = old_container.get('Volumes')['/etc']
num_containers_before = len(self.client.containers(all=True))
service.options['environment']['FOO'] = '2'
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
self.assertEqual(new_container.get('Config.Entrypoint'), ['top'])
self.assertEqual(new_container.get('Config.Cmd'), ['-d', '1'])
self.assertIn('FOO=2', new_container.get('Config.Env'))
self.assertEqual(new_container.name, 'composetest_db_1')
self.assertEqual(new_container.get('Volumes')['/etc'], volume_path)
self.assertIn(
'affinity:container==%s' % old_container.id,
new_container.get('Config.Env'))
self.assertEqual(len(self.client.containers(all=True)), num_containers_before)
self.assertNotEqual(old_container.id, new_container.id)
self.assertRaises(APIError,
self.client.inspect_container,
old_container.id)
def test_execute_convergence_plan_when_containers_are_stopped(self):
service = self.create_service(
'db',
environment={'FOO': '1'},
volumes=['/var/db'],
entrypoint=['top'],
command=['-d', '1']
)
service.create_container()
containers = service.containers(stopped=True)
self.assertEqual(len(containers), 1)
container, = containers
self.assertFalse(container.is_running)
service.execute_convergence_plan(ConvergencePlan('start', [container]))
containers = service.containers()
self.assertEqual(len(containers), 1)
container.inspect()
self.assertEqual(container, containers[0])
self.assertTrue(container.is_running)
def test_execute_convergence_plan_with_image_declared_volume(self):
service = Service(
project='composetest',
name='db',
client=self.client,
build='tests/fixtures/dockerfile-with-volume',
)
old_container = create_and_start_container(service)
self.assertEqual(list(old_container.get('Volumes').keys()), ['/data'])
volume_path = old_container.get('Volumes')['/data']
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
self.assertEqual(list(new_container.get('Volumes')), ['/data'])
self.assertEqual(new_container.get('Volumes')['/data'], volume_path)
def test_start_container_passes_through_options(self):
db = self.create_service('db')
create_and_start_container(db, environment={'FOO': 'BAR'})
self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
def test_start_container_inherits_options_from_constructor(self):
db = self.create_service('db', environment={'FOO': 'BAR'})
create_and_start_container(db)
self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
def test_start_container_creates_links(self):
db = self.create_service('db')
web = self.create_service('web', links=[(db, None)])
create_and_start_container(db)
create_and_start_container(db)
create_and_start_container(web)
self.assertEqual(
set(web.containers()[0].links()),
set([
'composetest_db_1', 'db_1',
'composetest_db_2', 'db_2',
'db'])
)
def test_start_container_creates_links_with_names(self):
db = self.create_service('db')
web = self.create_service('web', links=[(db, 'custom_link_name')])
create_and_start_container(db)
create_and_start_container(db)
create_and_start_container(web)
self.assertEqual(
set(web.containers()[0].links()),
set([
'composetest_db_1', 'db_1',
'composetest_db_2', 'db_2',
'custom_link_name'])
)
def test_start_container_with_external_links(self):
db = self.create_service('db')
web = self.create_service('web', external_links=['composetest_db_1',
'composetest_db_2',
'composetest_db_3:db_3'])
for _ in range(3):
create_and_start_container(db)
create_and_start_container(web)
self.assertEqual(
set(web.containers()[0].links()),
set([
'composetest_db_1',
'composetest_db_2',
'db_3']),
)
def test_start_normal_container_does_not_create_links_to_its_own_service(self):
db = self.create_service('db')
create_and_start_container(db)
create_and_start_container(db)
c = create_and_start_container(db)
self.assertEqual(set(c.links()), set([]))
def test_start_one_off_container_creates_links_to_its_own_service(self):
db = self.create_service('db')
create_and_start_container(db)
create_and_start_container(db)
c = create_and_start_container(db, one_off=True)
self.assertEqual(
set(c.links()),
set([
'composetest_db_1', 'db_1',
'composetest_db_2', 'db_2',
'db'])
)
def test_start_container_builds_images(self):
service = Service(
name='test',
client=self.client,
build='tests/fixtures/simple-dockerfile',
project='composetest',
)
container = create_and_start_container(service)
container.wait()
self.assertIn(b'success', container.logs())
self.assertEqual(len(self.client.images(name='composetest_test')), 1)
def test_start_container_uses_tagged_image_if_it_exists(self):
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
service = Service(
name='test',
client=self.client,
build='this/does/not/exist/and/will/throw/error',
project='composetest',
)
container = create_and_start_container(service)
container.wait()
self.assertIn(b'success', container.logs())
def test_start_container_creates_ports(self):
service = self.create_service('web', ports=[8000])
container = create_and_start_container(service).inspect()
self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/tcp'])
self.assertNotEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
def test_build(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("FROM busybox\n")
self.create_service('web', build=base_dir).build()
self.assertEqual(len(self.client.images(name='composetest_web')), 1)
def test_build_non_ascii_filename(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("FROM busybox\n")
with open(os.path.join(base_dir.encode('utf8'), b'foo\xE2bar'), 'w') as f:
f.write("hello world\n")
self.create_service('web', build=text_type(base_dir)).build()
self.assertEqual(len(self.client.images(name='composetest_web')), 1)
def test_start_container_stays_unpriviliged(self):
service = self.create_service('web')
container = create_and_start_container(service).inspect()
self.assertEqual(container['HostConfig']['Privileged'], False)
def test_start_container_becomes_priviliged(self):
service = self.create_service('web', privileged=True)
container = create_and_start_container(service).inspect()
self.assertEqual(container['HostConfig']['Privileged'], True)
def test_expose_does_not_publish_ports(self):
service = self.create_service('web', expose=["8000"])
container = create_and_start_container(service).inspect()
self.assertEqual(container['NetworkSettings']['Ports'], {'8000/tcp': None})
def test_start_container_creates_port_with_explicit_protocol(self):
service = self.create_service('web', ports=['8000/udp'])
container = create_and_start_container(service).inspect()
self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/udp'])
def test_start_container_creates_fixed_external_ports(self):
service = self.create_service('web', ports=['8000:8000'])
container = create_and_start_container(service).inspect()
self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
def test_start_container_creates_fixed_external_ports_when_it_is_different_to_internal_port(self):
service = self.create_service('web', ports=['8001:8000'])
container = create_and_start_container(service).inspect()
self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8001')
def test_port_with_explicit_interface(self):
service = self.create_service('web', ports=[
'127.0.0.1:8001:8000',
'0.0.0.0:9001:9000/udp',
])
container = create_and_start_container(service).inspect()
self.assertEqual(container['NetworkSettings']['Ports'], {
'8000/tcp': [
{
'HostIp': '127.0.0.1',
'HostPort': '8001',
},
],
'9000/udp': [
{
'HostIp': '0.0.0.0',
'HostPort': '9001',
},
],
})
def test_create_with_image_id(self):
# Get image id for the current busybox:latest
pull_busybox(self.client)
image_id = self.client.inspect_image('busybox:latest')['Id'][:12]
service = self.create_service('foo', image=image_id)
service.create_container()
def test_scale(self):
service = self.create_service('web')
service.scale(1)
self.assertEqual(len(service.containers()), 1)
# Ensure containers don't have stdout or stdin connected
container = service.containers()[0]
config = container.inspect()['Config']
self.assertFalse(config['AttachStderr'])
self.assertFalse(config['AttachStdout'])
self.assertFalse(config['AttachStdin'])
service.scale(3)
self.assertEqual(len(service.containers()), 3)
service.scale(1)
self.assertEqual(len(service.containers()), 1)
service.scale(0)
self.assertEqual(len(service.containers()), 0)
def test_scale_with_stopped_containers(self):
"""
Given there are some stopped containers and scale is called with a
desired number that is the same as the number of stopped containers,
test that those containers are restarted and not removed/recreated.
"""
service = self.create_service('web')
next_number = service._next_container_number()
valid_numbers = [next_number, next_number + 1]
service.create_container(number=next_number)
service.create_container(number=next_number + 1)
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
service.scale(2)
for container in service.containers():
self.assertTrue(container.is_running)
self.assertTrue(container.number in valid_numbers)
captured_output = mock_stdout.getvalue()
self.assertNotIn('Creating', captured_output)
self.assertIn('Starting', captured_output)
@mock.patch('sys.stdout', new_callable=StringIO)
def test_scale_with_stopped_containers_and_needing_creation(self, mock_stdout):
"""
Given there are some stopped containers and scale is called with a
desired number that is greater than the number of stopped containers,
test that those containers are restarted and required number are created.
"""
service = self.create_service('web')
next_number = service._next_container_number()
service.create_container(number=next_number, quiet=True)
for container in service.containers():
self.assertFalse(container.is_running)
service.scale(2)
self.assertEqual(len(service.containers()), 2)
for container in service.containers():
self.assertTrue(container.is_running)
captured_output = mock_stdout.getvalue()
self.assertIn('Creating', captured_output)
self.assertIn('Starting', captured_output)
@mock.patch('sys.stdout', new_callable=StringIO)
def test_scale_with_api_returns_errors(self, mock_stdout):
"""
Test that when scaling if the API returns an error, that error is handled
and the remaining threads continue.
"""
service = self.create_service('web')
next_number = service._next_container_number()
service.create_container(number=next_number, quiet=True)
with mock.patch(
'compose.container.Container.create',
side_effect=APIError(message="testing", response={}, explanation="Boom")):
service.scale(3)
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.assertIn("ERROR: for 2 Boom", mock_stdout.getvalue())
def test_scale_with_api_returns_unexpected_exception(self):
"""
Test that when scaling if the API returns an error, that is not of type
APIError, that error is re-raised.
"""
service = self.create_service('web')
next_number = service._next_container_number()
service.create_container(number=next_number, quiet=True)
with mock.patch(
'compose.container.Container.create',
side_effect=ValueError("BOOM")
):
with self.assertRaises(ValueError):
service.scale(3)
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
@mock.patch('compose.service.log')
def test_scale_with_desired_number_already_achieved(self, mock_log):
"""
Test that calling scale with a desired number that is equal to the
number of containers already running results in no change.
"""
service = self.create_service('web')
next_number = service._next_container_number()
container = service.create_container(number=next_number, quiet=True)
container.start()
self.assertTrue(container.is_running)
self.assertEqual(len(service.containers()), 1)
service.scale(1)
self.assertEqual(len(service.containers()), 1)
container.inspect()
self.assertTrue(container.is_running)
captured_output = mock_log.info.call_args[0]
self.assertIn('Desired container number already achieved', captured_output)
@mock.patch('compose.service.log')
def test_scale_with_custom_container_name_outputs_warning(self, mock_log):
"""
Test that calling scale on a service that has a custom container name
results in warning output.
"""
service = self.create_service('app', container_name='custom-container')
self.assertEqual(service.custom_container_name(), 'custom-container')
service.scale(3)
captured_output = mock_log.warn.call_args[0][0]
self.assertEqual(len(service.containers()), 1)
self.assertIn(
"Remove the custom name to scale the service.",
captured_output
)
def test_scale_sets_ports(self):
service = self.create_service('web', ports=['8000'])
service.scale(2)
containers = service.containers()
self.assertEqual(len(containers), 2)
for container in containers:
self.assertEqual(list(container.inspect()['HostConfig']['PortBindings'].keys()), ['8000/tcp'])
def test_network_mode_none(self):
service = self.create_service('web', net=Net('none'))
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.NetworkMode'), 'none')
def test_network_mode_bridged(self):
service = self.create_service('web', net=Net('bridge'))
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.NetworkMode'), 'bridge')
def test_network_mode_host(self):
service = self.create_service('web', net=Net('host'))
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.NetworkMode'), 'host')
def test_pid_mode_none_defined(self):
service = self.create_service('web', pid=None)
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.PidMode'), '')
def test_pid_mode_host(self):
service = self.create_service('web', pid='host')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.PidMode'), 'host')
def test_dns_no_value(self):
service = self.create_service('web')
container = create_and_start_container(service)
self.assertIsNone(container.get('HostConfig.Dns'))
def test_dns_single_value(self):
service = self.create_service('web', dns='8.8.8.8')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8'])
def test_dns_list(self):
service = self.create_service('web', dns=['8.8.8.8', '9.9.9.9'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8', '9.9.9.9'])
def test_restart_always_value(self):
service = self.create_service('web', restart='always')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'always')
def test_restart_on_failure_value(self):
service = self.create_service('web', restart='on-failure:5')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'on-failure')
self.assertEqual(container.get('HostConfig.RestartPolicy.MaximumRetryCount'), 5)
def test_cap_add_list(self):
service = self.create_service('web', cap_add=['SYS_ADMIN', 'NET_ADMIN'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.CapAdd'), ['SYS_ADMIN', 'NET_ADMIN'])
def test_cap_drop_list(self):
service = self.create_service('web', cap_drop=['SYS_ADMIN', 'NET_ADMIN'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.CapDrop'), ['SYS_ADMIN', 'NET_ADMIN'])
def test_dns_search_no_value(self):
service = self.create_service('web')
container = create_and_start_container(service)
self.assertIsNone(container.get('HostConfig.DnsSearch'))
def test_dns_search_single_value(self):
service = self.create_service('web', dns_search='example.com')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.DnsSearch'), ['example.com'])
def test_dns_search_list(self):
service = self.create_service('web', dns_search=['dc1.example.com', 'dc2.example.com'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.DnsSearch'), ['dc1.example.com', 'dc2.example.com'])
def test_working_dir_param(self):
service = self.create_service('container', working_dir='/working/dir/sample')
container = service.create_container()
self.assertEqual(container.get('Config.WorkingDir'), '/working/dir/sample')
def test_split_env(self):
service = self.create_service('web', environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS='])
env = create_and_start_container(service).environment
for k, v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items():
self.assertEqual(env[k], v)
def test_env_from_file_combined_with_env(self):
service = self.create_service(
'web',
environment=['ONE=1', 'TWO=2', 'THREE=3'],
env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env'])
env = create_and_start_container(service).environment
for k, v in {'ONE': '1', 'TWO': '2', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'}.items():
self.assertEqual(env[k], v)
@mock.patch.dict(os.environ)
def test_resolve_env(self):
os.environ['FILE_DEF'] = 'E1'
os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3'
service = self.create_service('web', environment={'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': None, 'NO_DEF': None})
env = create_and_start_container(service).environment
for k, v in {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}.items():
self.assertEqual(env[k], v)
def test_with_high_enough_api_version_we_get_default_network_mode(self):
# TODO: remove this test once minimum docker version is 1.8.x
with mock.patch.object(self.client, '_version', '1.20'):
service = self.create_service('web')
service_config = service._get_container_host_config({})
self.assertEquals(service_config['NetworkMode'], 'default')
def test_labels(self):
labels_dict = {
'com.example.description': "Accounting webapp",
'com.example.department': "Finance",
'com.example.label-with-empty-value': "",
}
compose_labels = {
LABEL_CONTAINER_NUMBER: '1',
LABEL_ONE_OFF: 'False',
LABEL_PROJECT: 'composetest',
LABEL_SERVICE: 'web',
LABEL_VERSION: __version__,
}
expected = dict(labels_dict, **compose_labels)
service = self.create_service('web', labels=labels_dict)
labels = create_and_start_container(service).labels.items()
for pair in expected.items():
self.assertIn(pair, labels)
service.kill()
service.remove_stopped()
labels_list = ["%s=%s" % pair for pair in labels_dict.items()]
service = self.create_service('web', labels=labels_list)
labels = create_and_start_container(service).labels.items()
for pair in expected.items():
self.assertIn(pair, labels)
def test_empty_labels(self):
labels_list = ['foo', 'bar']
service = self.create_service('web', labels=labels_list)
labels = create_and_start_container(service).labels.items()
for name in labels_list:
self.assertIn((name, ''), labels)
def test_custom_container_name(self):
service = self.create_service('web', container_name='my-web-container')
self.assertEqual(service.custom_container_name(), 'my-web-container')
container = create_and_start_container(service)
self.assertEqual(container.name, 'my-web-container')
one_off_container = service.create_container(one_off=True)
self.assertNotEqual(one_off_container.name, 'my-web-container')
def test_log_drive_invalid(self):
service = self.create_service('web', log_driver='xxx')
expected_error_msg = "logger: no log driver named 'xxx' is registered"
with self.assertRaisesRegexp(APIError, expected_error_msg):
create_and_start_container(service)
def test_log_drive_empty_default_jsonfile(self):
service = self.create_service('web')
log_config = create_and_start_container(service).log_config
self.assertEqual('json-file', log_config['Type'])
self.assertFalse(log_config['Config'])
def test_log_drive_none(self):
service = self.create_service('web', log_driver='none')
log_config = create_and_start_container(service).log_config
self.assertEqual('none', log_config['Type'])
self.assertFalse(log_config['Config'])
def test_devices(self):
service = self.create_service('web', devices=["/dev/random:/dev/mapped-random"])
device_config = create_and_start_container(service).get('HostConfig.Devices')
device_dict = {
'PathOnHost': '/dev/random',
'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/mapped-random'
}
self.assertEqual(1, len(device_config))
self.assertDictEqual(device_dict, device_config[0])
def test_duplicate_containers(self):
service = self.create_service('web')
options = service._get_container_create_options({}, 1)
original = Container.create(service.client, **options)
self.assertEqual(set(service.containers(stopped=True)), set([original]))
self.assertEqual(set(service.duplicate_containers()), set())
options['name'] = 'temporary_container_name'
duplicate = Container.create(service.client, **options)
self.assertEqual(set(service.containers(stopped=True)), set([original, duplicate]))
self.assertEqual(set(service.duplicate_containers()), set([duplicate]))
|
|
from vilya.models.project import CodeDoubanProject
from vilya.models.sphinx_docs import SphinxDocs
import nose
from tests.base import TestCase
from tests.utils import delete_project
base_yaml_conf_old = """
sphinx_docs:
dir: ""
"""
base_yaml_conf = """
docs:
docs:
dir: ""
builder: pickle
"""
base_index_rst = """
Unit testing sphinx docs
========================
.. toctree::
:glob:
*
"""
base_document1_rst = """
Test doc1
=========
Something here
"""
base_document2_rst = """
Test doc2
=========
Something here
"""
class TestDocsHelpers(TestCase):
html1 = '<h1>TITLE1**</h1>'
def _prj(self):
delete_project('test')
prj = CodeDoubanProject.add('test', 'owner', create_trac=False)
return prj
def _add(self, prj, fn, content):
u = self.addUser()
prj.git.commit_one_file(fn, content, 'add %s' % fn, u)
class TestDocs(TestDocsHelpers):
@nose.tools.raises(Exception)
def test_create_wrong(self):
sd = SphinxDocs('unexisting_project')
assert sd.enabled is False
def test_create_disabled(self):
prj = self._prj()
conf = """
sphinx_docs: ""
docs:
docs:
builder: pickle
"""
self._add(prj, 'code_config.yaml', conf)
sd = SphinxDocs(prj.name)
assert sd.enabled is True, "should be enabled by default"
def test_create_enabled(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', base_yaml_conf)
sd = SphinxDocs(prj.name)
assert sd.enabled is True
def test_create_with_index_and_doc(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', base_yaml_conf)
self._add(prj, 'index.rst', base_index_rst)
self._add(prj, 'doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
builder = sd.get_builder('docs')
doc = builder.template_data('', {})
assert doc['title'] == 'Unit testing sphinx docs'
def test_build_info(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', base_yaml_conf)
self._add(prj, 'index.rst', base_index_rst)
self._add(prj, 'doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
bi = sd.last_build_info()
assert bi['status'] == 'success'
def test_need_rebuild(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', base_yaml_conf)
self._add(prj, 'index.rst', base_index_rst)
sd = SphinxDocs(prj.name)
assert sd.need_rebuild()
sd.build_all()
assert not sd.need_rebuild()
self._add(prj, 'doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name) # Bad, should not have to refresh object
assert sd.need_rebuild()
sd.build_all()
assert not sd.need_rebuild()
def test_create_with_index_and_doc_and_get_again(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', base_yaml_conf)
self._add(prj, 'index.rst', base_index_rst)
self._add(prj, 'doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
sd2 = SphinxDocs(prj.name)
builder = sd2.get_builder('docs')
assert builder.template
doc = builder.template_data('', {})
assert doc['title'] == 'Unit testing sphinx docs'
def test_create_with_index_and_doc_and_two_builders(self):
prj = self._prj()
base_yaml_conf_two_builders = """
docs:
docs:
builder: html
dir: ""
html_theme: default
html_short_title: testsub
docs2:
dir: ""
builder: pickle
"""
self._add(prj, 'code_config.yaml', base_yaml_conf_two_builders)
self._add(prj, 'index.rst', base_index_rst)
self._add(prj, 'doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
assert sd.builders == ['docs', 'docs2']
pickle_builder = sd.get_builder('docs2')
assert pickle_builder.template
doc = pickle_builder.template_data('', {})
assert doc['title'] == 'Unit testing sphinx docs'
html_builder = sd.get_builder('docs')
assert not html_builder.template
raw = html_builder.raw_content('index.html', {})
assert "<h1>Unit testing sphinx docs" in raw
class TestDocsPages(TestDocsHelpers):
conf = 'docs: {"pages": {"builder": "raw"}}'
builder = 'raw'
def test_pages_mode(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', self.conf)
self._add(prj, 'pages/index.html', self.html1)
sd = SphinxDocs(prj.name)
assert sd.builders == ['pages']
assert sd.last_build_info() is None
sd.build_all()
assert sd.last_build_info()['status'] == 'success'
builder = sd.get_builder(sd.builders[0])
assert builder.raw_content('index.html', {}) == self.html1
def test_pages_no_docsdir(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', self.conf)
self._add(prj, 'pagesNOT_THE_SAME/index.html', self.html1)
sd = SphinxDocs(prj.name)
sd.build_all()
assert sd.last_build_info()['status'] == 'no_doc_dir_found'
builder = sd.get_builder(sd.builders[0])
assert builder.raw_content('index.html', {}) is False
def test_html_and_raw_builders(self):
prj = self._prj()
base_yaml_conf_two_builders = """
docs:
docs:
builder: html
html_short_title: testsub
dir: docs
html_theme: default
pages:
builder: raw
dir: docs
"""
self._add(prj, 'code_config.yaml', base_yaml_conf_two_builders)
self._add(prj, 'docs/index.rst', base_index_rst)
self._add(prj, 'docs/index.html', self.html1)
self._add(prj, 'docs/doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
assert sd.builders == ['docs', 'pages']
raw_builder = sd.get_builder('pages')
doc = raw_builder.raw_content('index.html', {})
assert doc == self.html1
html_builder = sd.get_builder('docs')
assert not html_builder.template
raw = html_builder.raw_content('index.html', {})
assert "<h1>Unit testing sphinx docs" in raw
def test_html_and_raw_builders_in_different_dirs(self):
prj = self._prj()
base_yaml_conf_two_builders = """
docs:
docs:
builder: html
html_short_title: testsub
dir: html_docs
html_theme: default
pages:
builder: raw
dir: pages
"""
self._add(prj, 'code_config.yaml', base_yaml_conf_two_builders)
self._add(prj, 'html_docs/index.rst', base_index_rst)
self._add(prj, 'html_docs/doc1.rst', base_document1_rst)
self._add(prj, 'pages/index.html', self.html1)
sd = SphinxDocs(prj.name)
sd.build_all()
assert sd.builders == ['docs', 'pages']
raw_builder = sd.get_builder('pages')
doc = raw_builder.raw_content('index.html', {})
assert doc == self.html1
html_builder = sd.get_builder('docs')
assert not html_builder.template
raw = html_builder.raw_content('index.html', {})
assert "<h1>Unit testing sphinx docs" in raw
class TestDocsPagesNewConf(TestDocsHelpers):
def test_two_builders_with_other_config_fmt(self):
prj = self._prj()
base_yaml_conf_two_builders = """
docs:
docs:
builder: html
html_theme: default
html_short_title: testsub
dir: html_docs
pages:
builder: raw
"""
self._add(prj, 'code_config.yaml', base_yaml_conf_two_builders)
self._add(prj, 'html_docs/index.rst', base_index_rst)
self._add(prj, 'html_docs/doc1.rst', base_document1_rst)
self._add(prj, 'pages/index.html', self.html1)
sd = SphinxDocs(prj.name)
sd.build_all()
assert sd.builders == ['docs', 'pages'] # noqa Sorted alphabetically by default
raw_builder = sd.get_builder('pages')
doc = raw_builder.raw_content('index.html', {})
assert doc == self.html1
html_builder = sd.get_builder('docs')
assert not html_builder.template
raw = html_builder.raw_content('index.html', {})
assert "<h1>Unit testing sphinx docs" in raw
def test_sort_key(self):
prj = self._prj()
base_yaml_conf_two_builders = """
sphinx_docs:
docs:
docs:
builder: html
html_theme: default
html_short_title: testsub
sort: 2
pages:
builder: raw
sort: 1
"""
self._add(prj, 'code_config.yaml', base_yaml_conf_two_builders)
sd = SphinxDocs(prj.name)
assert sd.builders == ['pages', 'docs']
|
|
from __future__ import absolute_import
from pyswagger.spec import base
import unittest
import six
import copy
class GrandChildObj(six.with_metaclass(base.FieldMeta, base.BaseObj)):
__swagger_fields__ = {
'name': ''
}
class GrandChildContext(base.Context):
__swagger_ref_object__ = GrandChildObj
class ChildObj(six.with_metaclass(base.FieldMeta, base.BaseObj)):
__swagger_fields__ = {
'g': None
}
class ChildContext(base.Context):
__swagger_ref_object__ = ChildObj
__swagger_child__ = {
'g': (None, GrandChildContext)
}
class TObj(six.with_metaclass(base.FieldMeta, base.BaseObj)):
__swagger_fields__ = {
'a': [],
'b': {},
'c': {},
'd': None,
'f': None
}
class TContext(base.Context):
__swagger_ref_object__ = TObj
__swagger_child__ = {
'a': (base.ContainerType.list_, ChildContext),
'b': (base.ContainerType.dict_, ChildContext),
'c': (base.ContainerType.dict_of_list_, ChildContext),
'd': (None, ChildContext),
}
class SwaggerBaseTestCase(unittest.TestCase):
""" test things in base.py """
def test_baseobj_children(self):
""" test _children_ """
tmp = {'t': {}}
obj = {'a': [{}, {}, {}], 'b': {'/a': {}, '~b': {}, 'cc': {}}}
with TContext(tmp, 't') as ctx:
ctx.parse(obj)
c = tmp['t']._children_.keys()
self.assertEqual(sorted(c), sorted(['a/0', 'a/1', 'a/2', 'b/cc', 'b/~0b', 'b/~1a']))
def test_baseobj_parent(self):
""" test _parent_ """
tmp = {'t': {}}
obj = {'a': [{}], 'b': {'bb': {}}, 'c': {'cc': [{}]}, 'd': {}}
with TContext(tmp, 't') as ctx:
ctx.parse(obj)
def _check(o):
self.assertTrue(isinstance(o, ChildObj))
self.assertEqual(id(tmp['t']), id(o._parent_))
_check(tmp['t'].a[0])
_check(tmp['t'].b['bb'])
_check(tmp['t'].c['cc'][0])
_check(tmp['t'].d)
def test_field_rename(self):
""" renamed field name """
class TestRenameObj(six.with_metaclass(base.FieldMeta, base.BaseObj)):
__swagger_fields__ = {'a': None}
__swagger_rename__ = {'a': 'b'}
class TestRenameContext(base.Context):
__swagger_ref_object__ = TestRenameObj
tmp = {'t': {}}
obj = {'a': 1}
with TestRenameContext(tmp, 't') as ctx:
ctx.parse(obj)
# make sure there is no 'a' property
self.assertRaises(AttributeError, lambda x: x.a, tmp['t'])
# make sure property 'b' exists
self.assertTrue(tmp['t'].b, 1)
def test_field_default_value(self):
""" field default value, make sure we won't reference to a global declared list_
"""
o1 = TObj(base.NullContext())
o2 = TObj(base.NullContext())
self.assertTrue(id(o1.a) != id(o2.a))
def test_merge(self):
""" test merge function """
class MergeObj(six.with_metaclass(base.FieldMeta, base.BaseObj)):
__swagger_fields__ = {
'ma': None,
'mb': None,
'mc': {},
'md': {},
'mf': [],
}
class MergeContext(base.Context):
__swagger_child__ = {
'ma': (None, TContext),
'mb': (None, TContext),
'mc': (base.ContainerType.dict_, TContext)
}
__swagger_ref_object__ = MergeObj
tmp = {'t': {}}
obj2 = {
'mb':{'a':[{}, {}, {}]},
'md':{'2': 2},
'mf':[2, 3]
}
obj1 = {
'ma':{'a':[{}, {}, {}, {}]},
'mb':{'a':[{}, {}]},
'mc':{'/a': {'a': [{}], 'b': {'bb': {}}, 'c': {'cc': [{}]}, 'd': {}}},
'md':{'1': 1},
'mf':[1, 2]
}
o3 = MergeObj(base.NullContext())
with MergeContext(tmp, 't') as ctx:
ctx.parse(obj1)
o1 = tmp['t']
with MergeContext(tmp, 't') as ctx:
ctx.parse(obj2)
o2 = tmp['t']
def _chk(o_from, o_to):
# existing children are not affected
self.assertTrue(len(o_to.mb.a), 3)
# non-existing children are fully copied3
self.assertEqual(len(o_to.ma.a), 4)
self.assertNotEqual(id(o_to.ma), id(o_from.ma))
# make sure complex children are copied
self.assertNotEqual(id(o_to.mc), id(o_from.mc))
self.assertEqual(len(o_to.mc['/a'].a), 1)
self.assertTrue(isinstance(o_to.mc['/a'].b['bb'], ChildObj))
self.assertNotEqual(id(o_to.mc['/a'].b['bb']), id(o_from.mc['/a'].b['bb']))
self.assertTrue(isinstance(o_to.mc['/a'].c['cc'][0], ChildObj))
self.assertNotEqual(id(o_to.mc['/a'].c['cc'][0]), id(o_from.mc['/a'].c['cc'][0]))
self.assertTrue(o_to.mc['/a'].d, ChildObj)
self.assertNotEqual(id(o_to.mc['/a'].d), id(o1.mc['/a'].d))
self.assertEqual(o_to.md, {'1': 1, '2': 2})
self.assertEqual(sorted(o_to.mf), sorted([1, 2, 3]))
def _chk_parent(o_from, o_to):
for v in o_to.ma.a:
self.assertEqual(id(v._parent_), id(o_to.ma))
self.assertNotEqual(id(v._parent_), id(o_from.ma))
self.assertEqual(id(o_to.ma._parent_), id(o_to))
self.assertEqual(id(o_to.mb._parent_), id(o_to))
self.assertEqual(id(o_to.mc['/a']._parent_), id(o_to))
self.assertEqual(id(o_to.mc['/a'].a[0]._parent_), id(o_to.mc['/a']))
self.assertEqual(id(o_to.mc['/a'].b['bb']._parent_), id(o_to.mc['/a']))
self.assertEqual(id(o_to.mc['/a'].c['cc'][0]._parent_), id(o_to.mc['/a']))
self.assertEqual(o2.ma, None)
self.assertTrue(isinstance(o2.mb, TObj))
self.assertTrue(len(o2.mb.a), 3)
self.assertEqual(len(o2.mc), 0)
id_mb = id(o2.mb)
o2.merge(o1, MergeContext)
self.assertNotEqual(id(o2.mb), id(o1.mb))
self.assertEqual(id(o2.mb), id_mb)
# cascade merge
o3.merge(o2, MergeContext)
_chk(o1, o2)
_chk(o2, o3)
_chk(o1, o3)
_chk_parent(o1, o2)
_chk_parent(o2, o3)
_chk_parent(o1, o3)
def test_merge_exclude(self):
""" test 'exclude' in merge """
tmp = {'t': {}}
obj = {'a': [{}, {}, {}], 'b': {'/a': {}, '~b': {}, 'cc': {}}}
with TContext(tmp, 't') as ctx:
ctx.parse(obj)
o = tmp['t']
o1, o2 = TObj(base.NullContext()), TObj(base.NullContext())
o1.merge(o, TContext)
o2.merge(o, TContext, exclude=['b'])
self.assertEqual(len(o1.a), 3)
self.assertEqual(len(o2.a), 3)
self.assertEqual(len(o1.b), 3)
self.assertEqual(len(o2.b), 0)
def test_resolve(self):
""" test resolve function """
tmp = {'t': {}}
obj = {'a': [{}, {}, {}], 'b': {'/a': {}, '~b': {}, 'cc': {}}}
with TContext(tmp, 't') as ctx:
ctx.parse(obj)
o = tmp['t']
self.assertEqual(id(o.resolve('a')), id(o.a))
self.assertEqual(id(o.resolve(['a'])), id(o.resolve('a')))
self.assertEqual(id(o.resolve(['b', '/a'])), id(o.b['/a']))
def test_is_produced(self):
""" test is_produced function """
class ChildNotOkContext(base.Context):
__swagger_ref_object__ = ChildObj
@classmethod
def is_produced(kls, obj):
return False
class TestOkContext(base.Context):
__swagger_ref_object__ = TObj
__swagger_child__ = {
'a': (None, ChildContext)
}
class TestNotOkContext(base.Context):
__swagger_ref_object__ = TObj
__swagger_child__ = {
'a': (None, ChildNotOkContext)
}
tmp = {'t': {}}
obj = {'a': {}}
with TestOkContext(tmp, 't') as ctx:
# should not raise
ctx.parse(obj)
ctx = TestNotOkContext(tmp, 't')
try:
# simulate what ContextManager does
ctx.parse(obj)
ctx.__exit__(None, None, None)
except ValueError as e:
self.failUnlessEqual(e.args, ('Object is not instance of ChildObj but ChildObj',))
else:
self.fail('ValueError not raised')
def test_produce(self):
""" test produce function """
class TestBoolContext(base.Context):
__swagger_ref_object__ = TObj
__swagger_child__ = {
'a': (None, ChildContext),
}
def produce(self):
return True
tmp = {'t': {}}
obj = {'a': {}}
with TestBoolContext(tmp, 't') as ctx:
ctx.parse(obj)
self.assertTrue(isinstance(tmp['t'], bool))
self.assertEqual(tmp['t'], True)
def test_compare(self):
""" test compare """
tmp = {'t': {}}
obj = {
'a': [{'g': {'name':'Tom'}}, {'g': {'name': 'Kevin'}}],
'b': {
'bb': {},
'bbb': {'g': {'name': 'Owl'}}
},
'c': {
'cc': [
{'g': {'name':'Mary'}}
]
},
'd': {}
}
with TContext(tmp, 't') as ctx:
ctx.parse(obj)
obj1 = tmp['t']
# make sure ok when compare with self
self.assertEqual((True, ''), obj1.compare(obj1))
# make sure diff in list would be captured
objt = copy.deepcopy(obj)
objt['a'][0]['g']['name'] = 'Tom1'
tmp = {'t': {}}
with TContext(tmp, 't') as ctx:
ctx.parse(objt)
obj2 = tmp['t']
self.assertEqual((False, 'a/0/g/name'), obj1.compare(obj2))
# make sure re order in list would be ok
objt = copy.deepcopy(obj)
objt['a'][0], objt['a'][1] = objt['a'][1], objt['a'][0]
tmp = {'t': {}}
with TContext(tmp, 't') as ctx:
ctx.parse(objt)
obj3 = tmp['t']
self.assertEqual((False, 'a/0/g/name'), obj1.compare(obj3))
# make sure diff in dict would be captured
objt = copy.deepcopy(obj)
objt['b']['bbb']['g']['name'] = 'Leo'
tmp = {'t': {}}
with TContext(tmp, 't') as ctx:
ctx.parse(objt)
obj4 = tmp['t']
self.assertEqual((False, 'b/bbb/g/name'), obj1.compare(obj4))
# make sure diff in dict of list would be captured
objt = copy.deepcopy(obj)
objt['c']['cc'][0]['g']['name'] = 'Celios'
tmp = {'t': {}}
with TContext(tmp, 't') as ctx:
ctx.parse(objt)
obj5 = tmp['t']
self.assertEqual((False, 'c/cc/0/g/name'), obj1.compare(obj5))
# make sure diff in dict would be captured
objt = copy.deepcopy(obj)
objt['b']['bbbb'] = {'g': {'name': 'Leo'}}
tmp = {'t': {}}
with TContext(tmp, 't') as ctx:
ctx.parse(objt)
obj6 = tmp['t']
self.assertEqual((False, 'b/bbbb'), obj1.compare(obj6))
def test_inheritance(self):
""" test case for multiple layers of inheritance of BaseObj
"""
class A(six.with_metaclass(base.FieldMeta, base.BaseObj)):
__swagger_fields__ = {'a': None}
class B(six.with_metaclass(base.FieldMeta, A)):
__swagger_fields__ = {'b': None}
class C(six.with_metaclass(base.FieldMeta, B)):
__swagger_fields__ = {'c': None}
class D(six.with_metaclass(base.FieldMeta, C)):
__swagger_fields__ = {'d': None}
class Dx(base.Context):
__swagger_ref_object__ = D
obj = dict(a=1, b=2, c=3, d=4)
tmp = {'t': {}}
with Dx(tmp, 't') as ctx:
ctx.parse(obj)
d = tmp['t']
self.assertEqual(d.a, 1)
self.assertEqual(d.b, 2)
self.assertEqual(d.c, 3)
self.assertEqual(d.d, 4)
|
|
# -*- coding: utf-8 -*-
"""
gspread.models
~~~~~~~~~~~~~~
This module contains common spreadsheets' models
"""
import re
from collections import defaultdict
from itertools import chain
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from . import urlencode
from .ns import _ns, _ns1, ATOM_NS, BATCH_NS, SPREADSHEET_NS
from .urls import construct_url
from .utils import finditem, numericise_all
from .exceptions import IncorrectCellLabel, WorksheetNotFound, CellNotFound
try:
unicode
except NameError:
basestring = unicode = str
# Patch ElementTree._escape_attrib
_elementtree_escape_attrib = ElementTree._escape_attrib
def _escape_attrib(text, encoding=None, replace=None):
try:
text = _elementtree_escape_attrib(text)
except TypeError as e:
if str(e) == '_escape_attrib() takes exactly 2 arguments (1 given)':
text = _elementtree_escape_attrib(text, encoding)
entities = {'\n': ' ', '\r': ' ', '\t': '	'}
for key, value in entities.items():
text = text.replace(key, value)
return text
ElementTree._escape_attrib = _escape_attrib
class Spreadsheet(object):
""" A class for a spreadsheet object."""
def __init__(self, client, feed_entry):
self.client = client
self._sheet_list = []
self._feed_entry = feed_entry
@property
def id(self):
return self._feed_entry.find(_ns('id')).text.split('/')[-1]
def get_id_fields(self):
return {'spreadsheet_id': self.id}
def _fetch_sheets(self):
feed = self.client.get_worksheets_feed(self)
for elem in feed.findall(_ns('entry')):
self._sheet_list.append(Worksheet(self, elem))
def add_worksheet(self, title, rows, cols):
"""Adds a new worksheet to a spreadsheet.
:param title: A title of a new worksheet.
:param rows: Number of rows.
:param cols: Number of columns.
Returns a newly created :class:`worksheets <Worksheet>`.
"""
feed = Element('entry', {'xmlns': ATOM_NS,
'xmlns:gs': SPREADSHEET_NS})
SubElement(feed, 'title').text = title
SubElement(feed, 'gs:rowCount').text = str(rows)
SubElement(feed, 'gs:colCount').text = str(cols)
url = construct_url('worksheets', self)
elem = self.client.post_feed(url, ElementTree.tostring(feed))
worksheet = Worksheet(self, elem)
self._sheet_list.append(worksheet)
return worksheet
def del_worksheet(self, worksheet):
"""Deletes a worksheet from a spreadsheet.
:param worksheet: The worksheet to be deleted.
"""
self.client.del_worksheet(worksheet)
self._sheet_list.remove(worksheet)
def worksheets(self):
"""Returns a list of all :class:`worksheets <Worksheet>`
in a spreadsheet.
"""
if not self._sheet_list:
self._fetch_sheets()
return self._sheet_list[:]
def worksheet(self, title):
"""Returns a worksheet with specified `title`.
The returning object is an instance of :class:`Worksheet`.
:param title: A title of a worksheet. If there're multiple
worksheets with the same title, first one will
be returned.
Example. Getting worksheet named 'Annual bonuses'
>>> sht = client.open('Sample one')
>>> worksheet = sht.worksheet('Annual bonuses')
"""
if not self._sheet_list:
self._fetch_sheets()
try:
return finditem(lambda x: x.title == title, self._sheet_list)
except StopIteration:
raise WorksheetNotFound(title)
def get_worksheet(self, index):
"""Returns a worksheet with specified `index`.
The returning object is an instance of :class:`Worksheet`.
:param index: An index of a worksheet. Indexes start from zero.
Example. To get first worksheet of a spreadsheet:
>>> sht = client.open('My fancy spreadsheet')
>>> worksheet = sht.get_worksheet(0)
Returns `None` if the worksheet is not found.
"""
if not self._sheet_list:
self._fetch_sheets()
try:
return self._sheet_list[index]
except IndexError:
return None
@property
def sheet1(self):
"""Shortcut property for getting the first worksheet."""
return self.get_worksheet(0)
@property
def title(self):
return self._feed_entry.find(_ns('title')).text
def __iter__(self):
for sheet in self.worksheets():
yield(sheet)
class Worksheet(object):
"""A class for worksheet object."""
def __init__(self, spreadsheet, element):
self.spreadsheet = spreadsheet
self.client = spreadsheet.client
self._id = element.find(_ns('id')).text.split('/')[-1]
self._title = element.find(_ns('title')).text
self._element = element
try:
self.version = self._get_link(
'edit', element).get('href').split('/')[-1]
except:
# not relevant for read-only spreadsheets
self.version = None
def __repr__(self):
return '<%s %s id:%s>' % (self.__class__.__name__,
repr(self.title),
self.id)
@property
def id(self):
"""Id of a worksheet."""
return self._id
@property
def title(self):
"""Title of a worksheet."""
return self._title
@property
def row_count(self):
"""Number of rows"""
return int(self._element.find(_ns1('rowCount')).text)
@property
def col_count(self):
"""Number of columns"""
return int(self._element.find(_ns1('colCount')).text)
@property
def updated(self):
"""Updated time in RFC 3339 format"""
return self._element.find(_ns('updated')).text
def get_id_fields(self):
return {'spreadsheet_id': self.spreadsheet.id,
'worksheet_id': self.id}
def _cell_addr(self, row, col):
return 'R%sC%s' % (row, col)
def _get_link(self, link_type, feed):
return finditem(lambda x: x.get('rel') == link_type,
feed.findall(_ns('link')))
def _fetch_cells(self):
feed = self.client.get_cells_feed(self)
return [Cell(self, elem) for elem in feed.findall(_ns('entry'))]
_MAGIC_NUMBER = 64
_cell_addr_re = re.compile(r'([A-Za-z]+)(\d+)')
def get_int_addr(self, label):
"""Translates cell's label address to a tuple of integers.
The result is a tuple containing `row` and `column` numbers.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
Example:
>>> wks.get_int_addr('A1')
(1, 1)
"""
m = self._cell_addr_re.match(label)
if m:
column_label = m.group(1).upper()
row = int(m.group(2))
col = 0
for i, c in enumerate(reversed(column_label)):
col += (ord(c) - self._MAGIC_NUMBER) * (26 ** i)
else:
raise IncorrectCellLabel(label)
return (row, col)
def get_addr_int(self, row, col):
"""Translates cell's tuple of integers to a cell label.
The result is a string containing the cell's coordinates in label form.
:param row: The row of the cell to be converted.
Rows start at index 1.
:param col: The column of the cell to be converted.
Columns start at index 1.
Example:
>>> wks.get_addr_int(1, 1)
A1
"""
row = int(row)
col = int(col)
if row < 1 or col < 1:
raise IncorrectCellLabel('(%s, %s)' % (row, col))
div = col
column_label = ''
while div:
(div, mod) = divmod(div, 26)
if mod == 0:
mod = 26
div -= 1
column_label = chr(mod + self._MAGIC_NUMBER) + column_label
label = '%s%s' % (column_label, row)
return label
def acell(self, label):
"""Returns an instance of a :class:`Cell`.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
Example:
>>> wks.acell('A1') # this could be 'a1' as well
<Cell R1C1 "I'm cell A1">
"""
return self.cell(*(self.get_int_addr(label)))
def cell(self, row, col):
"""Returns an instance of a :class:`Cell` positioned in `row`
and `col` column.
:param row: Integer row number.
:param col: Integer column number.
Example:
>>> wks.cell(1, 1)
<Cell R1C1 "I'm cell A1">
"""
feed = self.client.get_cells_cell_id_feed(self,
self._cell_addr(row, col))
return Cell(self, feed)
def range(self, alphanum):
"""Returns a list of :class:`Cell` objects from specified range.
:param alphanum: A string with range value in common format,
e.g. 'A1:A5'.
"""
feed = self.client.get_cells_feed(self, params={'range': alphanum,
'return-empty': 'true'})
return [Cell(self, elem) for elem in feed.findall(_ns('entry'))]
def get_all_values(self):
"""Returns a list of lists containing all cells' values as strings."""
cells = self._fetch_cells()
# defaultdicts fill in gaps for empty rows/cells not returned by gdocs
rows = defaultdict(lambda: defaultdict(str))
for cell in cells:
row = rows.setdefault(int(cell.row), defaultdict(str))
row[cell.col] = cell.value
# we return a whole rectangular region worth of cells, including
# empties
if not rows:
return []
all_row_keys = chain.from_iterable(row.keys() for row in rows.values())
rect_cols = range(1, max(all_row_keys) + 1)
rect_rows = range(1, max(rows.keys()) + 1)
return [[rows[i][j] for j in rect_cols] for i in rect_rows]
def get_all_records(self, empty2zero=False, head=1):
"""Returns a list of dictionaries, all of them having:
- the contents of the spreadsheet's with the head row as keys,
And each of these dictionaries holding
- the contents of subsequent rows of cells as values.
Cell values are numericised (strings that can be read as ints
or floats are converted).
:param empty2zero: determines whether empty cells are converted to zeros.
:param head: determines wich row to use as keys, starting from 1
following the numeration of the spreadsheet."""
idx = head - 1
data = self.get_all_values()
keys = data[idx]
values = [numericise_all(row, empty2zero) for row in data[idx + 1:]]
return [dict(zip(keys, row)) for row in values]
def row_values(self, row):
"""Returns a list of all values in a `row`.
Empty cells in this list will be rendered as :const:`None`.
"""
start_cell = self.get_addr_int(row, 1)
end_cell = self.get_addr_int(row, self.col_count)
row_cells = self.range('%s:%s' % (start_cell, end_cell))
return [cell.value for cell in row_cells]
def col_values(self, col):
"""Returns a list of all values in column `col`.
Empty cells in this list will be rendered as :const:`None`.
"""
start_cell = self.get_addr_int(1, col)
end_cell = self.get_addr_int(self.row_count, col)
row_cells = self.range('%s:%s' % (start_cell, end_cell))
return [cell.value for cell in row_cells]
def update_acell(self, label, val):
"""Sets the new value to a cell.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
:param val: New value.
Example:
>>> wks.update_acell('A1', '42') # this could be 'a1' as well
<Cell R1C1 "I'm cell A1">
"""
return self.update_cell(*(self.get_int_addr(label)), val=val)
def update_cell(self, row, col, val):
"""Sets the new value to a cell.
:param row: Row number.
:param col: Column number.
:param val: New value.
"""
feed = self.client.get_cells_cell_id_feed(self,
self._cell_addr(row, col))
cell_elem = feed.find(_ns1('cell'))
cell_elem.set('inputValue', unicode(val))
uri = self._get_link('edit', feed).get('href')
self.client.put_feed(uri, ElementTree.tostring(feed))
def _create_update_feed(self, cell_list):
feed = Element('feed', {'xmlns': ATOM_NS,
'xmlns:batch': BATCH_NS,
'xmlns:gs': SPREADSHEET_NS})
id_elem = SubElement(feed, 'id')
id_elem.text = construct_url('cells', self)
for cell in cell_list:
entry = SubElement(feed, 'entry')
SubElement(entry, 'batch:id').text = cell.element.find(
_ns('title')).text
SubElement(entry, 'batch:operation', {'type': 'update'})
SubElement(entry, 'id').text = cell.element.find(_ns('id')).text
edit_link = finditem(lambda x: x.get('rel') == 'edit',
cell.element.findall(_ns('link')))
SubElement(entry, 'link', {'rel': 'edit',
'type': edit_link.get('type'),
'href': edit_link.get('href')})
SubElement(entry, 'gs:cell', {'row': str(cell.row),
'col': str(cell.col),
'inputValue': unicode(cell.value)})
return feed
def update_cells(self, cell_list):
"""Updates cells in batch.
:param cell_list: List of a :class:`Cell` objects to update.
"""
feed = self._create_update_feed(cell_list)
self.client.post_cells(self, ElementTree.tostring(feed))
def resize(self, rows=None, cols=None):
"""Resizes the worksheet.
:param rows: New rows number.
:param cols: New columns number.
"""
if rows is None and cols is None:
raise TypeError("Either 'rows' or 'cols' should be specified.")
self_uri = self._get_link('self', self._element).get('href')
feed = self.client.get_feed(self_uri)
uri = self._get_link('edit', feed).get('href')
if rows:
elem = feed.find(_ns1('rowCount'))
elem.text = str(rows)
if cols:
elem = feed.find(_ns1('colCount'))
elem.text = str(cols)
# Send request and store result
self._element = self.client.put_feed(uri, ElementTree.tostring(feed))
def add_rows(self, rows):
"""Adds rows to worksheet.
:param rows: Rows number to add.
"""
self.resize(rows=self.row_count + rows)
def add_cols(self, cols):
"""Adds colums to worksheet.
:param cols: Columns number to add.
"""
self.resize(cols=self.col_count + cols)
def append_row(self, values):
"""Adds a row to the worksheet and populates it with values.
Widens the worksheet if there are more values than columns.
Note that a new Google Sheet has 100 or 1000 rows by default. You
may need to scroll down to find the new row.
:param values: List of values for the new row.
"""
self.add_rows(1)
new_row = self.row_count
data_width = len(values)
if self.col_count < data_width:
self.resize(cols=data_width)
cell_list = []
for i, value in enumerate(values, start=1):
cell = self.cell(new_row, i)
cell.value = value
cell_list.append(cell)
self.update_cells(cell_list)
def insert_row(self, values, index=1):
""""Adds a row to the worksheet at the specified index and populates it with values.
Widens the worksheet if there are more values than columns.
:param values: List of values for the new row.
"""
if index == self.row_count + 1:
return self.append_row(values)
elif index > self.row_count + 1:
raise IndexError('Row index out of range')
self.add_rows(1)
data_width = len(values)
if self.col_count < data_width:
self.resize(cols=data_width)
# Retrieve all Cells at or below `index` using a single batch query
top_left = self.get_addr_int(index, 1)
bottom_right = self.get_addr_int(self.row_count, self.col_count)
range_str = '%s:%s' % (top_left, bottom_right)
cells_after_insert = self.range(range_str)
for ind, cell in reversed(list(enumerate(cells_after_insert))):
if ind < self.col_count:
# For the first row, take the cell values from `values`
new_val = values[ind] if ind < len(values) else ''
else:
# For all other rows, take the cell values from the row above
new_val = cells_after_insert[ind - self.col_count].value
cell.value = new_val
self.update_cells(cells_after_insert)
def _finder(self, func, query):
cells = self._fetch_cells()
if isinstance(query, basestring):
match = lambda x: x.value == query
else:
match = lambda x: query.search(x.value)
return func(match, cells)
def find(self, query):
"""Finds first cell matching query.
:param query: A text string or compiled regular expression.
"""
try:
return self._finder(finditem, query)
except StopIteration:
raise CellNotFound(query)
def findall(self, query):
"""Finds all cells matching query.
:param query: A text string or compiled regular expression.
"""
return self._finder(filter, query)
def export(self, format='csv'):
"""Export the worksheet in specified format.
:param format: A format of the output.
"""
export_link = self._get_link(
'http://schemas.google.com/spreadsheets/2006#exportcsv',
self._element).get('href')
url, qs = export_link.split('?')
params = dict(param.split('=') for param in qs.split('&'))
params['format'] = format
params = urlencode(params)
export_link = '%s?%s' % (url, params)
return self.client.session.get(export_link)
class Cell(object):
"""An instance of this class represents a single cell
in a :class:`worksheet <Worksheet>`.
"""
def __init__(self, worksheet, element):
self.element = element
cell_elem = element.find(_ns1('cell'))
self._row = int(cell_elem.get('row'))
self._col = int(cell_elem.get('col'))
self.input_value = cell_elem.get('inputValue')
numeric_value = cell_elem.get('numericValue')
self.numeric_value = float(numeric_value) if numeric_value else None
#: Value of the cell.
self.value = cell_elem.text or ''
@property
def row(self):
"""Row number of the cell."""
return self._row
@property
def col(self):
"""Column number of the cell."""
return self._col
def __repr__(self):
return '<%s R%sC%s %s>' % (self.__class__.__name__,
self.row,
self.col,
repr(self.value))
|
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from toontown.toonbase.ToontownGlobals import *
from toontown.toonbase import TTLocalizer
from toontown.parties import PartyGlobals
from toontown.hood import *
import Fireworks
import FireworkShows
from FireworkGlobals import skyTransitionDuration, preShowPauseDuration, postShowPauseDuration, preNormalMusicPauseDuration
from toontown.effects.FireworkShow import FireworkShow
class FireworkShowMixin:
notify = DirectNotifyGlobal.directNotify.newCategory('FireworkShowMixin')
def __init__(self, restorePlaygroundMusic = True, startDelay = 0.0):
self.currentShow = None
self.restorePlaygroundMusic = restorePlaygroundMusic
self.startDelay = startDelay
self.timestamp = None
self.fireworkShow = None
self.eventId = JULY4_FIREWORKS
self.accept('MusicEnabled', self.startMusic)
return
def disable(self):
if self.currentShow:
self.currentShow.pause()
self.currentShow = None
if base.cr.config.GetBool('want-old-fireworks', 0):
ivalMgr.finishIntervalsMatching('shootFirework*')
else:
self.destroyFireworkShow()
from toontown.hood import DDHood
if isinstance(self.getHood(), DDHood.DDHood):
self.getHood().whiteFogColor = Vec4(0.8, 0.8, 0.8, 1)
self.restoreCameraLens()
if hasattr(self.getHood(), 'loader'):
self.getGeom().clearColorScale()
if hasattr(self.getHood(), 'sky'):
self.getSky().show()
self.getSky().clearColorScale()
if hasattr(base, 'localAvatar') and base.localAvatar:
base.localAvatar.clearColorScale()
base.setBackgroundColor(DefaultBackgroundColor)
self.ignoreAll()
return
def startMusic(self):
if self.timestamp:
self.getLoader().music.stop()
t = globalClockDelta.localElapsedTime(self.timestamp) - self.startDelay
base.playMusic(self.showMusic, 0, 1, 1, max(0, t))
def shootFirework(self, x, y, z, style, color1, color2):
amp = 5
Fireworks.shootFirework(style, x, y, z, color1, color2, amp)
def startShow(self, eventId, style, songId, timestamp, root = render):
t = globalClockDelta.localElapsedTime(timestamp) - self.startDelay
self.timestamp = timestamp
self.showMusic = None
self.eventId = eventId
if base.config.GetBool('want-old-fireworks', 0):
self.currentShow = self.getFireworkShowIval(eventId, style, songId, t)
if self.currentShow:
self.currentShow.start(t)
else:
self.createFireworkShow()
if t > self.fireworkShow.getShowDuration():
return
preShow = self.preShow(eventId, songId, t)
postShow = self.postShow(eventId)
beginFireworkShow = Func(self.beginFireworkShow, max(0, t), root)
self.currentShow = Sequence(preShow, beginFireworkShow, Wait(max(0, self.fireworkShow.getShowDuration() - max(0, t))), postShow)
self.currentShow.start()
return
def preShow(self, eventId, songId, startT):
if eventId == JULY4_FIREWORKS:
instructionMessage = TTLocalizer.FireworksInstructions
startMessage = TTLocalizer.FireworksJuly4Beginning
endMessage = TTLocalizer.FireworksJuly4Ending
songs = ['tt_summer', 'firework_music']
musicFile = 'phase_4/audio/bgm/%s.ogg' % songs[songId]
elif eventId == NEWYEARS_FIREWORKS:
instructionMessage = TTLocalizer.FireworksInstructions
startMessage = TTLocalizer.FireworksNewYearsEveBeginning
endMessage = TTLocalizer.FireworksNewYearsEveEnding
songs = ['new_years_fireworks_music', 'tt_s_ara_gen_fireworks_auldLangSyne']
musicFile = 'phase_4/audio/bgm/%s.ogg' % songs[songId]
elif eventId == PartyGlobals.FireworkShows.Summer:
instructionMessage = TTLocalizer.FireworksActivityInstructions
startMessage = TTLocalizer.FireworksActivityBeginning
endMessage = TTLocalizer.FireworksActivityEnding
songs = ['tt_party1', 'tt_party2']
musicFile = 'phase_4/audio/bgm/%s.ogg' % songs[songId]
elif eventId == COMBO_FIREWORKS:
instructionMessage = TTLocalizer.FireworksInstructions
startMessage = TTLocalizer.FireworksComboBeginning
endMessage = TTLocalizer.FireworksComboEnding
songs = ['new_years_fireworks_music', 'tt_s_ara_gen_fireworks_auldLangSyne']
musicFile = 'phase_4/audio/bgm/%s.ogg' % songs[songId]
else:
FireworkShowMixin.notify.warning('Invalid fireworks event ID: %d' % eventId)
return None
self.showMusic = loader.loadMusic(musicFile)
self.showMusic.setVolume(1)
def __lightDecorationOn__():
place = base.cr.playGame.getPlace()
if place is None:
return
if hasattr(place, 'halloweenLights'):
if not self.__checkStreetValidity():
return
else:
place.halloweenLights = base.cr.playGame.getPlace().loader.geom.findAllMatches('**/*light*')
place.halloweenLights.extend(base.cr.playGame.getPlace().loader.geom.findAllMatches('**/*lamp*'))
for light in place.halloweenLights:
light.setColorScaleOff(0)
elif not self.__checkHoodValidity():
return
else:
place.loader.hood.halloweenLights = base.cr.playGame.hood.loader.geom.findAllMatches('**/*light*')
place.loader.hood.halloweenLights.extend(base.cr.playGame.hood.loader.geom.findAllMatches('**/*lamp*'))
for light in base.cr.playGame.hood.halloweenLights:
light.setColorScaleOff(0)
if self.fireworkShow and not self.fireworkShow.isEmpty():
self.fireworkShow.setColorScaleOff(0)
return
def restoreCameraLens(self):
hood = self.getHood()
if hood != None:
if hood.id == GoofySpeedway or hood.id == OutdoorZone:
base.camLens.setFar(SpeedwayCameraFar)
else:
base.camLens.setFar(DefaultCameraFar)
def postShow(self, eventId):
if eventId == JULY4_FIREWORKS:
endMessage = TTLocalizer.FireworksJuly4Ending
elif eventId == NEWYEARS_FIREWORKS:
endMessage = TTLocalizer.FireworksNewYearsEveEnding
elif eventId == PartyGlobals.FireworkShows.Summer:
endMessage = TTLocalizer.FireworksActivityEnding
elif eventId == COMBO_FIREWORKS:
endMessage = TTLocalizer.FireworksComboEnding
else:
FireworkShowMixin.notify.warning('Invalid fireworks event ID: %d' % eventId)
return None
if self.__checkHoodValidity() and hasattr(base.cr.playGame.hood, 'sky') and base.cr.playGame.hood.sky:
postShow = Sequence(Func(base.cr.playGame.hood.sky.show), Parallel(LerpColorScaleInterval(base.cr.playGame.hood.sky, 2.5, Vec4(1, 1, 1, 1)), LerpColorScaleInterval(base.cr.playGame.hood.loader.geom, 2.5, Vec4(1, 1, 1, 1)), LerpColorScaleInterval(base.localAvatar, 2.5, Vec4(1, 1, 1, 1))), Func(self.__restoreDDFog), Func(self.restoreCameraLens), Func(base.setBackgroundColor, DefaultBackgroundColor), Func(self.showMusic.stop), Func(base.localAvatar.setSystemMessage, 0, endMessage))
if self.restorePlaygroundMusic:
postShow.append(Wait(2.0))
postShow.append(Func(base.playMusic, self.getLoader().music, 1, 1, 0.8))
return postShow
def createFireworkShow(self):
if not self.fireworkShow:
self.fireworkShow = FireworkShow(self.eventId)
def destroyFireworkShow(self):
if self.fireworkShow:
self.fireworkShow.cleanupShow()
self.fireworkShow = None
return
def beginFireworkShow(self, timeStamp, root):
if self.fireworkShow and not self.fireworkShow.isPlaying():
self.fireworkShow.begin(timeStamp)
self.fireworkShow.reparentTo(root)
hood = self.getHood()
if isinstance(hood, TTHood.TTHood):
self.fireworkShow.setPos(150, 0, 80)
self.fireworkShow.setHpr(90, 0, 0)
elif isinstance(hood, BRHood.BRHood):
self.fireworkShow.setPos(-200, -60, 50)
self.fireworkShow.setHpr(270, 0, 0)
elif isinstance(hood, MMHood.MMHood):
self.fireworkShow.setPos(150, -25, 40)
self.fireworkShow.setHpr(90, 0, 0)
elif isinstance(hood, DGHood.DGHood):
self.fireworkShow.setPos(-80, -50, 60)
self.fireworkShow.setHpr(0, 0, 0)
elif isinstance(hood, DLHood.DLHood):
self.fireworkShow.setPos(-160, 0, 80)
self.fireworkShow.setHpr(270, 0, 0)
elif isinstance(hood, GSHood.GSHood):
self.fireworkShow.setPos(60, -350, 80)
self.fireworkShow.setHpr(20, 0, 0)
elif isinstance(hood, DDHood.DDHood):
self.fireworkShow.setPos(150, 0, 50)
self.fireworkShow.setHpr(90, 0, 0)
elif isinstance(hood, OZHood.OZHood):
self.fireworkShow.setPos(-450, -80, 140)
self.fireworkShow.setHpr(300, 0, 0)
elif isinstance(hood, PartyHood.PartyHood):
self.fireworkShow.setPos(0, -400, 120)
self.fireworkShow.lookAt(0, 0, 0)
self.fireworkShow.setScale(1.8)
def getFireworkShowIval(self, eventId, index, songId, startT):
show = FireworkShows.getShow(eventId, index)
if show is None:
FireworkShowMixin.notify.warning('could not find firework show: index: %s' % index)
return
preShow = self.preShow(eventId, songId, startT)
mainShow = Sequence()
currentT = skyTransitionDuration + preShowPauseDuration
for effect in show:
waitTime, style, colorIndex1, colorIndex2, amp, x, y, z = effect
if waitTime > 0:
currentT += waitTime
mainShow.append(Wait(waitTime))
if currentT >= startT:
mainShow.append(Func(Fireworks.shootFirework, style, x, y, z, colorIndex1, colorIndex2, amp))
postShow = self.postShow(eventId)
return Sequence(preShow, mainShow, postShow)
def clearMyColorScales(self):
if self.getGeom() and not self.getGeom().isEmpty():
self.getGeom().clearColorScale()
if self.getSky() and not self.getSky().isEmpty():
self.getSky().clearColorScale()
def getLoader(self):
if base.cr.playGame.hood != None:
return base.cr.playGame.hood.loader
return
def getHood(self):
if base.cr.playGame.hood != None:
return base.cr.playGame.hood
return
def getGeom(self):
loader = self.getLoader()
if loader:
return loader.geom
return None
def getSky(self):
hood = self.getHood()
if hood:
return hood.sky
return None
def __checkDDFog(self):
from toontown.hood import DDHood
if isinstance(self.getHood(), DDHood.DDHood):
self.getHood().whiteFogColor = Vec4(0.2, 0.2, 0.2, 1)
if hasattr(base.cr.playGame.getPlace(), 'cameraSubmerged'):
if not base.cr.playGame.getPlace().cameraSubmerged:
self.getHood().setWhiteFog()
def __restoreDDFog(self):
from toontown.hood import DDHood
if isinstance(self.getHood(), DDHood.DDHood):
self.getHood().whiteFogColor = Vec4(0.8, 0.8, 0.8, 1)
if hasattr(base.cr.playGame.getPlace(), 'cameraSubmerged'):
if not base.cr.playGame.getPlace().cameraSubmerged:
self.getHood().setWhiteFog()
def __checkStreetValidity(self):
if hasattr(base.cr.playGame, 'getPlace') and base.cr.playGame.getPlace() and hasattr(base.cr.playGame.getPlace(), 'loader') and base.cr.playGame.getPlace().loader and hasattr(base.cr.playGame.getPlace().loader, 'geom') and base.cr.playGame.getPlace().loader.geom:
return True
else:
return False
def __checkHoodValidity(self):
if hasattr(base.cr.playGame, 'hood') and base.cr.playGame.hood and hasattr(base.cr.playGame.hood, 'loader') and base.cr.playGame.hood.loader and hasattr(base.cr.playGame.hood.loader, 'geom') and base.cr.playGame.hood.loader.geom:
return True
else:
return False
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, Directory
from resource_management.libraries.script import Script
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.security_commons import build_expectations
from resource_management.libraries.functions.security_commons import cached_kinit_executor
from resource_management.libraries.functions.security_commons import get_params_from_filesystem
from resource_management.libraries.functions.security_commons import validate_security_config_properties
from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
from resource_management.core.resources.system import File
from hive import hive
from hive import jdbc_connector
from hive_service import hive_service
from ambari_commons.os_family_impl import OsFamilyImpl
from ambari_commons import OSConst
# the legacy conf.server location in previous stack versions
LEGACY_HIVE_SERVER_CONF = "/etc/hive/conf.server"
class HiveMetastore(Script):
def install(self, env):
import params
self.install_packages(env)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
# writing configurations on start required for securtity
self.configure(env)
hive_service('metastore', action='start', upgrade_type=upgrade_type)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
hive_service('metastore', action='stop', upgrade_type=upgrade_type)
def configure(self, env):
import params
env.set_params(params)
hive(name = 'metastore')
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HiveMetastoreWindows(HiveMetastore):
def status(self, env):
import status_params
from resource_management.libraries.functions import check_windows_service_status
check_windows_service_status(status_params.hive_metastore_win_service_name)
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HiveMetastoreDefault(HiveMetastore):
def get_component_name(self):
return "hive-metastore"
def status(self, env):
import status_params
from resource_management.libraries.functions import check_process_status
env.set_params(status_params)
pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
# Recursively check all existing gmetad pid files
check_process_status(pid_file)
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Metastore Stack Upgrade pre-restart")
import params
env.set_params(params)
is_upgrade = params.upgrade_direction == Direction.UPGRADE
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
conf_select.select(params.stack_name, "hive", params.version)
stack_select.select("hive-metastore", params.version)
if is_upgrade and params.stack_version_formatted_major and \
check_stack_feature(StackFeature.HIVE_METASTORE_UPGRADE_SCHEMA, params.stack_version_formatted_major):
self.upgrade_schema(env)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
props_value_check = {"hive.server2.authentication": "KERBEROS",
"hive.metastore.sasl.enabled": "true",
"hive.security.authorization.enabled": "true"}
props_empty_check = ["hive.metastore.kerberos.keytab.file",
"hive.metastore.kerberos.principal"]
props_read_check = ["hive.metastore.kerberos.keytab.file"]
hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
props_read_check)
hive_expectations ={}
hive_expectations.update(hive_site_props)
security_params = get_params_from_filesystem(status_params.hive_conf_dir,
{'hive-site.xml': FILE_TYPE_XML})
result_issues = validate_security_config_properties(security_params, hive_expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if 'hive-site' not in security_params \
or 'hive.metastore.kerberos.keytab.file' not in security_params['hive-site'] \
or 'hive.metastore.kerberos.principal' not in security_params['hive-site']:
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.hive_user,
security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
security_params['hive-site']['hive.metastore.kerberos.principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
def upgrade_schema(self, env):
"""
Executes the schema upgrade binary. This is its own function because it could
be called as a standalone task from the upgrade pack, but is safe to run it for each
metastore instance. The schema upgrade on an already upgraded metastore is a NOOP.
The metastore schema upgrade requires a database driver library for most
databases. During an upgrade, it's possible that the library is not present,
so this will also attempt to copy/download the appropriate driver.
This function will also ensure that configurations are written out to disk before running
since the new configs will most likely not yet exist on an upgrade.
Should not be invoked for a DOWNGRADE; Metastore only supports schema upgrades.
"""
Logger.info("Upgrading Hive Metastore Schema")
import status_params
import params
env.set_params(params)
# ensure that configurations are written out before trying to upgrade the schema
# since the schematool needs configs and doesn't know how to use the hive conf override
self.configure(env)
if params.security_enabled:
cached_kinit_executor(status_params.kinit_path_local,
status_params.hive_user,
params.hive_metastore_keytab_path,
params.hive_metastore_principal,
status_params.hostname,
status_params.tmp_dir)
# ensure that the JDBC drive is present for the schema tool; if it's not
# present, then download it first
if params.hive_jdbc_driver in params.hive_jdbc_drivers_list:
target_directory = format("{stack_root}/{version}/hive/lib")
# download it if it does not exist
if not os.path.exists(params.source_jdbc_file):
jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
target_directory_and_filename = os.path.join(target_directory, os.path.basename(params.source_jdbc_file))
if params.sqla_db_used:
target_native_libs_directory = format("{target_directory}/native/lib64")
Execute(format("yes | {sudo} cp {jars_in_hive_lib} {target_directory}"))
Directory(target_native_libs_directory, create_parents = True)
Execute(format("yes | {sudo} cp {libs_in_hive_lib} {target_native_libs_directory}"))
Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
else:
# copy the JDBC driver from the older metastore location to the new location only
# if it does not already exist
if not os.path.exists(target_directory_and_filename):
Execute(('cp', params.source_jdbc_file, target_directory),
path=["/bin", "/usr/bin/"], sudo = True)
File(target_directory_and_filename, mode = 0644)
# build the schema tool command
binary = format("{hive_schematool_ver_bin}/schematool")
# the conf.server directory changed locations between stack versions
# since the configurations have not been written out yet during an upgrade
# we need to choose the original legacy location
schematool_hive_server_conf_dir = params.hive_server_conf_dir
if not(check_stack_feature(StackFeature.CONFIG_VERSIONING, params.version_for_stack_feature_checks)):
schematool_hive_server_conf_dir = LEGACY_HIVE_SERVER_CONF
env_dict = {
'HIVE_CONF_DIR': schematool_hive_server_conf_dir
}
command = format("{binary} -dbType {hive_metastore_db_type} -upgradeSchema")
Execute(command, user=params.hive_user, tries=1, environment=env_dict, logoutput=True)
def get_log_folder(self):
import params
return params.hive_log_dir
def get_user(self):
import params
return params.hive_user
if __name__ == "__main__":
HiveMetastore().execute()
|
|
import os
import shutil
import platform
import pandas as pd
import matplotlib.pyplot as plt
import flopy
import pyemu
model_ws = os.path.join("extra_crispy")
nam_file = "freyberg.nam"
ml = flopy.modflow.Modflow.load(nam_file,exe_name="mf2005",model_ws=model_ws,verbose=True)
ml.dis.sr.xul = 619653
ml.dis.sr.yul = 3353277
ml.dis.sr.rotation = 0
ml.dis.epsg_str = "EPSG:32614"
ml.dis.start_datetime = "11-5-1955"
#write a grid spec file
ml.dis.sr.write_gridSpec(os.path.join("misc","freyberg.spc"))
# write the bore coords file
obs_rowcol = pd.read_csv(os.path.join("misc","obs_rowcol.dat"),delim_whitespace=True)
obs_rowcol.loc[:,'x'] = ml.dis.sr.xcentergrid[obs_rowcol.row-1,obs_rowcol.col-1]
obs_rowcol.loc[:,'y'] = ml.dis.sr.ycentergrid[obs_rowcol.row-1,obs_rowcol.col-1]
obs_rowcol.loc[:,"top"] = ml.dis.top[obs_rowcol.row-1,obs_rowcol.col-1]
obs_rowcol.loc[:,"layer"] = 1
# use this later to set weights
obs_names = ["or{0:02d}c{1:02d}_0".format(r-1,c-1) for r,c in zip(obs_rowcol.row,obs_rowcol.col)]
# get the truth time series
h = flopy.utils.HeadFile(os.path.join(model_ws,"freyberg.hds"),model=ml)
data = h.get_alldata()
#write all those terrible mod2obs files
ibound = ml.bas6.ibound.array
#well_data = ml.wel.stress_period_data[0]
#ibound[0,well_data["i"],well_data['j']] = 0
#drn_data = ml.riv.stress_period_data[0]
#ibound[0,drn_data["i"],drn_data['j']] = 0
f_crd = open(os.path.join("misc","bore.crds"),'w')
for i in range(ml.nrow):
for j in range(ml.ncol):
if ibound[0,i,j] == 0:
continue
on = "or{0:02d}c{1:02d}".format(i,j)
ox = ml.dis.sr.xcentergrid[i,j]
oy = ml.dis.sr.ycentergrid[i,j]
ol = 1
f_crd.write("{0:20s} {1:15.6E} {2:15.6E} {3:d}\n".format(on,ox,oy,ol))
f_crd.close()
# run mod2smp to get the truth values
with open(os.path.join("settings.fig"),'w') as f:
f.write("date=dd/mm/yyyy\ncolrow=no")
with open(os.path.join("misc","mod2smp.in"),'w') as f:
f.write(os.path.join("misc","freyberg.spc")+'\n')
f.write(os.path.join("misc","bore.crds")+'\n')
f.write(os.path.join("misc","bore.crds")+'\n')
f.write(os.path.join("extra_crispy","freyberg.hds")+'\n')
f.write("f\n5\n1.0e+30\nd\n")
f.write("01/01/2015\n00:00:00\n")
f.write(os.path.join("misc","freyberg_heads.smp")+'\n')
os.system(os.path.join("exe","mod2smp.exe") + " <"+os.path.join("misc","mod2smp.in"))
# write the ins file for the head smp
pyemu.pst_utils.smp_to_ins(os.path.join("misc","freyberg_heads.smp"))
shutil.copy2(os.path.join("misc","freyberg_heads.smp"),os.path.join("misc","freyberg_heads_truth.smp"))
# write the hk template
pnames = []
with open(os.path.join("misc","hk_Layer_1.ref.tpl"),'w') as f:
f.write("ptf ~\n")
for i in range(ml.nrow):
for j in range(ml.ncol):
#print(i,j,ibound[0,i,j])
if ibound[0,i,j] == 0:
tpl_str = " 0.000000E+00"
else:
pn = "hkr{0:02d}c{1:02d}".format(i,j)
tpl_str = "~ {0:8s} ~".format(pn)
f.write("{0:14s} ".format(tpl_str))
pnames.append(pn)
f.write('\n')
# build pst instance
misc_files = os.listdir(os.path.join("misc"))
ins_files = [os.path.join("misc",f) for f in misc_files if f.endswith(".ins")]
out_files = [f.replace(".ins",'') for f in ins_files]
tpl_files = [os.path.join("misc",f) for f in misc_files if f.endswith(".tpl")]
in_files = [os.path.join(ml.model_ws,os.path.split(f)[-1]).replace(".tpl",'') for f in tpl_files]
in_files = [os.path.join(ml.model_ws,"ref",os.path.split(f)[-1]) if "layer" in f.lower() else f for f in in_files]
pst = pyemu.pst_utils.pst_from_io_files(tpl_files,in_files,ins_files,out_files)
# apply par values and bounds and groups
pdata = pst.parameter_data
grps = pdata.groupby(pdata.parnme.apply(lambda x:'hk' in x)).groups
hk_mean = ml.upw.hk.array.mean()
hk_stdev = ml.upw.hk.array.std()
lb = hk_mean * 0.1
ub = hk_mean * 10.0
pdata.loc[grps[True],"parval1"] = hk_mean
pdata.loc[grps[True],"parubnd"] = ub
pdata.loc[grps[True],"parlbnd"] = lb
pdata.loc[grps[True],"pargp"] = "hk"
# constant mults
grps = pdata.groupby(pdata.parnme.apply(lambda x:'rch' in x)).groups
pdata.loc[grps[True],"parval1"] = 1.0
pdata.loc[grps[True],"parubnd"] = 1.5
pdata.loc[grps[True],"parlbnd"] = 0.5
pdata.loc[grps[True],"pargp"] = "rch"
pdata.loc["rch_1","parval1"] = 1.0
pdata.loc["rch_1","parubnd"] = 1.1
pdata.loc["rch_1","parlbnd"] = 0.9
rcond_mean = ml.riv.stress_period_data[0]["cond"].mean()
rcond_std = ml.riv.stress_period_data[0]["cond"].std()
rcond_lb = rcond_mean * 0.1
rcond_ub = rcond_mean * 10.0
grps = pdata.groupby(pdata.parnme.apply(lambda x:'rcond' in x)).groups
pdata.loc[grps[True],"parval1"] = rcond_mean
pdata.loc[grps[True],"parubnd"] = rcond_ub
pdata.loc[grps[True],"parlbnd"] = rcond_lb
pdata.loc[grps[True],"pargp"] = "rcond"
wf_base = ml.wel.stress_period_data[0]["flux"]
wf_fore = ml.wel.stress_period_data[1]["flux"]
# grps = pdata.groupby(pdata.parnme.apply(lambda x:'wf' in x)).groups
# pdata.loc[grps[True],"parval1"] = 1.0
# pdata.loc[grps[True],"parubnd"] = 1.5
# pdata.loc[grps[True],"parlbnd"] = 0.5
# pdata.loc[grps[True],"pargp"] = "welflux"
grps = pdata.groupby(pdata.parnme.apply(lambda x:'wf' in x and x.endswith("_1"))).groups
pdata.loc[grps[True],"parval1"] = -1.0 * wf_base
pdata.loc[grps[True],"parubnd"] = -1.0 * wf_base * 1.1
pdata.loc[grps[True],"parlbnd"] = -1.0 * wf_base * 0.9
pdata.loc[grps[True],"scale"] = -1.0
pdata.loc[grps[True],"pargp"] = "welflux"
grps = pdata.groupby(pdata.parnme.apply(lambda x:'wf' in x and x.endswith("_2"))).groups
pdata.loc[grps[True],"parval1"] = -1.0 * wf_fore
pdata.loc[grps[True],"parubnd"] = -1.0 * wf_fore * 1.5
pdata.loc[grps[True],"parlbnd"] = -1.0 * wf_fore * 0.5
pdata.loc[grps[True],"scale"] = -1.0
pdata.loc[grps[True],"pargp"] = "welflux"
pdata.loc["ss","parval1"] = ml.upw.ss.array.mean()
pdata.loc["ss","parubnd"] = ml.upw.ss.array.mean() * 10.0
pdata.loc["ss","parlbnd"] = ml.upw.ss.array.mean() * 0.1
pdata.loc["ss","pargp"] = "storage"
pdata.loc["sy","parval1"] = ml.upw.sy.array.mean()
pdata.loc["sy","parubnd"] = ml.upw.sy.array.mean() * 10.0
pdata.loc["sy","parlbnd"] = ml.upw.sy.array.mean() * 0.1
pdata.loc["sy","pargp"] = "storage"
#apply obs weights and groups and values
import run
run.process()
run.write_other_obs_ins()
shutil.copy2(os.path.join("misc","other.obs"),os.path.join("misc","other.obs.truth"))
smp = pyemu.pst_utils.smp_to_dataframe(os.path.join("misc","freyberg_heads_truth.smp"))
values = list(smp.loc[:,"value"])
pst.observation_data.loc[:,"weight"] = 0.0
pst.observation_data.loc[:,"obgnme"] = "forecast"
groups = pst.observation_data.groupby(pst.observation_data.obsnme.apply(lambda x:x in obs_names)).groups
pst.observation_data.loc[groups[True],"weight"] = 100.0
pst.observation_data.loc[groups[True],"obgnme"] = "head_cal"
groups = pst.observation_data.groupby(pst.observation_data.obsnme.apply(lambda x:x.startswith('o'))).groups
pst.observation_data.loc[groups[True],"obsval"] = values
pst.observation_data.index = pst.observation_data.obsnme
with open(os.path.join("misc","other.obs.truth"),'r') as f:
for line in f:
raw = line.strip().split()
pst.observation_data.loc[raw[0],"obsval"] = float(raw[1])
pst.model_command[0] = "python run.py"
pst.zero_order_tikhonov()
pst.control_data.noptmax = 20
pst.pestpp_lines.append('++forecasts(travel_time,sw_gw_0,sw_gw_1,sw_gw_2)')
pst.pestpp_lines.append('++n_iter_base(1)')
pst.pestpp_lines.append('++n_iter_super(4)')
pst.pestpp_lines.append('++max_reg_iter(5)')
pst.write("freyberg.pst",update_regul=True)
if platform.system().lower() == "windows":
pest_exe = os.path.join("exe","pest++.exe")
else:
pest_exe = None
os.system(pest_exe + ' freyberg.pst /h :4004')
# dt_deltas = pd.to_timedelta(h.get_times(),unit="d")
# idx = pd.to_datetime(ml.dis.start_datetime) + dt_deltas
# obs_data = pd.DataFrame(data[:,0,obs_rowcol.row-1,obs_rowcol.col-1],columns=obs_rowcol.name,
# index=idx)
#
# print(obs_data.shape)
# obs_rowcol.index = obs_rowcol.name
# for name in obs_data.columns:
# top = obs_rowcol.loc[name,"top"]
# if obs_data.loc[:,name].max() > top:
# print(name,"flooded")
# fig = plt.figure()
# ax = plt.subplot(111)
# obs_data.loc[:,name].plot(ax=ax,legend=False,marker='.')
#
# ax.plot(ax.get_xlim(),[top,top],"k--")
# ax.set_title(name)
# plt.show()
# fig = plt.figure()
# ax = plt.subplot(111)
# ax = ml.wel.stress_period_data.plot(ax=ax)
# ax = ml.riv.stress_period_data.plot(ax=ax)
# ax.scatter(obs_rowcol.x,obs_rowcol.y)
# [ax.text(x,y,name) for x,y,name in zip(obs_rowcol.x,obs_rowcol.y,obs_rowcol.name)]
# ax = ml.wel.plot()[0]
# ax.scatter(obs_rowcol.x,obs_rowcol.y)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' Unit tests for Gluon Estimator '''
import sys
import unittest
import warnings
import pytest
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.gluon.contrib.estimator import *
from mxnet.gluon.contrib.estimator.event_handler import *
def _get_test_network(params=None):
net = nn.Sequential(params=params)
net.add(nn.Dense(4, activation='relu', flatten=False))
return net
def _get_test_network_with_namescope(params=None):
net = nn.Sequential(params=params)
with net.name_scope():
net.add(nn.Dense(4, activation='relu', flatten=False))
return net
def _get_test_data():
batch_size = 4
in_data = mx.nd.random.uniform(shape=(10, 3))
out_data = mx.nd.random.uniform(shape=(10, 4))
# Input dataloader
dataset = gluon.data.dataset.ArrayDataset(in_data, out_data)
dataloader = gluon.data.DataLoader(dataset, batch_size=batch_size)
dataiter = mx.io.NDArrayIter(data=in_data, label=out_data, batch_size=batch_size)
return dataloader, dataiter
def test_fit():
''' test estimator with different train data types '''
net = _get_test_network()
dataloader, dataiter = _get_test_data()
num_epochs = 1
ctx = mx.cpu()
loss = gluon.loss.L2Loss()
acc = mx.gluon.metric.Accuracy()
net.initialize(ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})
est = Estimator(net=net,
loss=loss,
train_metrics=acc,
trainer=trainer,
context=ctx)
est.fit(train_data=dataloader,
epochs=num_epochs)
with pytest.raises(ValueError):
est.fit(train_data=dataiter,
epochs=num_epochs)
# Input NDArray
with pytest.raises(ValueError):
est.fit(train_data=[mx.nd.ones(shape=(10, 3))],
epochs=num_epochs)
def test_validation():
''' test different validation data types'''
net = _get_test_network()
dataloader, dataiter = _get_test_data()
num_epochs = 1
ctx = mx.cpu()
loss = gluon.loss.L2Loss()
acc = mx.gluon.metric.Accuracy()
val_loss = gluon.loss.L1Loss()
net.initialize(ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})
est = Estimator(net=net,
loss=loss,
train_metrics=acc,
trainer=trainer,
context=ctx,
val_loss=val_loss)
# Input dataloader
est.fit(train_data=dataloader,
val_data=dataloader,
epochs=num_epochs)
# using validation handler
train_metrics = est.train_metrics
val_metrics = est.val_metrics
validation_handler = ValidationHandler(val_data=dataloader, eval_fn=est.evaluate)
with pytest.raises(ValueError):
est.fit(train_data=dataiter,
val_data=dataiter,
epochs=num_epochs)
# Input NDArray
with pytest.raises(ValueError):
est.fit(train_data=[mx.nd.ones(shape=(10, 3))],
val_data=[mx.nd.ones(shape=(10, 3))],
epochs=num_epochs)
def test_initializer():
''' test with no initializer, inconsistent initializer '''
net = _get_test_network()
train_data, _ = _get_test_data()
num_epochs = 1
ctx = mx.cpu()
loss = gluon.loss.L2Loss()
acc = mx.gluon.metric.Accuracy()
# no initializer
est = Estimator(net=net,
loss=loss,
train_metrics=acc,
context=ctx)
est.fit(train_data=train_data,
epochs=num_epochs)
# different initializer for net and estimator
net = _get_test_network()
net.initialize(mx.init.Xavier(), ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})
# catch reinit warning
with warnings.catch_warnings(record=True) as w:
est = Estimator(net=net,
loss=loss,
train_metrics=acc,
initializer=mx.init.MSRAPrelu(),
trainer=trainer,
context=ctx)
assert 'Network already fully initialized' in str(w[-1].message)
# net partially initialized, fine tuning use case
net = gluon.model_zoo.vision.resnet18_v1(pretrained=True, ctx=ctx)
net.output = gluon.nn.Dense(10) #last layer not initialized
est = Estimator(net, loss=loss, train_metrics=acc, context=ctx)
dataset = gluon.data.ArrayDataset(mx.nd.zeros((10, 3, 224, 224)), mx.nd.zeros((10, 10)))
train_data = gluon.data.DataLoader(dataset=dataset, batch_size=5)
est.fit(train_data=train_data,
epochs=num_epochs)
def test_trainer():
''' test with no trainer and invalid trainer '''
net = _get_test_network()
train_data, _ = _get_test_data()
num_epochs = 1
ctx = mx.cpu()
loss = gluon.loss.L2Loss()
acc = mx.gluon.metric.Accuracy()
net.initialize(ctx=ctx)
# input no trainer
with warnings.catch_warnings(record=True) as w:
est = Estimator(net=net,
loss=loss,
train_metrics=acc,
context=ctx)
assert 'No trainer specified' in str(w[-1].message)
est.fit(train_data=train_data,
epochs=num_epochs)
# input invalid trainer
trainer = 'sgd'
with pytest.raises(ValueError):
est = Estimator(net=net,
loss=loss,
train_metrics=acc,
trainer=trainer,
context=ctx)
def test_metric():
''' test with no metric, list of metrics, invalid metric '''
net = _get_test_network()
train_data, _ = _get_test_data()
num_epochs = 1
ctx = mx.cpu()
loss = gluon.loss.L2Loss()
net.initialize(ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})
# input no metric
est = Estimator(net=net,
loss=loss,
trainer=trainer,
context=ctx)
est.fit(train_data=train_data,
epochs=num_epochs)
# input list of metrics
metrics = [mx.gluon.metric.Accuracy(), mx.gluon.metric.Accuracy()]
est = Estimator(net=net,
loss=loss,
train_metrics=metrics,
trainer=trainer,
context=ctx)
est.fit(train_data=train_data,
epochs=num_epochs)
# input invalid metric
with pytest.raises(ValueError):
est = Estimator(net=net,
loss=loss,
train_metrics='acc',
trainer=trainer,
context=ctx)
# test default metric
loss = gluon.loss.SoftmaxCrossEntropyLoss()
est = Estimator(net=net,
loss=loss,
trainer=trainer,
context=ctx)
assert isinstance(est.train_metrics[0], mx.gluon.metric.Accuracy)
def test_loss():
''' test with invalid loss '''
net = _get_test_network()
ctx = mx.cpu()
acc = mx.gluon.metric.Accuracy()
net.initialize(ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})
# input invalid loss
with pytest.raises(ValueError):
est = Estimator(net=net,
loss='mse',
train_metrics=acc,
trainer=trainer,
context=ctx)
def test_context():
''' test with no context, list of context, invalid context '''
net = _get_test_network()
loss = gluon.loss.L2Loss()
metrics = mx.gluon.metric.Accuracy()
# input no context
est = Estimator(net=net,
loss=loss,
train_metrics=metrics)
# input list of context
gpus = mx.context.num_gpus()
ctx = [mx.gpu(i) for i in range(gpus)] if gpus > 0 else [mx.cpu()]
net = _get_test_network()
est = Estimator(net=net,
loss=loss,
train_metrics=metrics,
context=ctx)
# input invalid context
with pytest.raises(ValueError):
est = Estimator(net=net,
loss=loss,
train_metrics=metrics,
context='cpu')
with pytest.raises(AssertionError):
est = Estimator(net=net,
loss=loss,
train_metrics=metrics,
context=[mx.gpu(0), mx.gpu(100)])
def test_categorize_handlers():
class CustomHandler1(TrainBegin):
def train_begin(self):
print("custom train begin")
class CustomHandler2(EpochBegin, BatchBegin, TrainEnd):
def epoch_begin(self):
print("custom epoch begin")
def batch_begin(self):
print("custom batch begin")
def train_end(self):
print("custom train end")
class CustomHandler3(EpochBegin, BatchBegin, BatchEnd, TrainEnd):
def epoch_begin(self):
print("custom epoch begin")
def batch_begin(self):
print("custom batch begin")
def batch_end(self):
print("custom batch end")
def train_end(self):
print("custom train end")
net = nn.Sequential()
net.add(nn.Dense(10))
loss = gluon.loss.SoftmaxCrossEntropyLoss()
est = Estimator(net, loss=loss)
event_handlers = [CustomHandler1(), CustomHandler2(), CustomHandler3()]
train_begin, epoch_begin, batch_begin, \
batch_end, epoch_end, train_end = est._categorize_handlers(event_handlers)
assert len(train_begin) == 1
assert len(epoch_begin) == 2
assert len(batch_begin) == 2
assert len(batch_end) == 1
assert len(train_end) == 2
def test_default_handlers():
net = _get_test_network()
train_data, _ = _get_test_data()
num_epochs = 1
ctx = mx.cpu()
net.initialize(ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})
train_acc = mx.gluon.metric.RMSE()
loss = gluon.loss.L2Loss()
est = Estimator(net=net,
loss=loss,
train_metrics=train_acc,
trainer=trainer,
context=ctx)
# no handler(all default handlers), no warning
with warnings.catch_warnings(record=True) as w:
est.fit(train_data=train_data, epochs=num_epochs)
# handler with prepared loss and metrics
# use mix of default and user defined handlers
train_metrics = est.train_metrics
val_metrics = est.val_metrics
logging = LoggingHandler(metrics=train_metrics)
est.fit(train_data=train_data, epochs=num_epochs, event_handlers=[logging])
# handler with all user defined metrics
# use mix of default and user defined handlers
metric = MetricHandler(metrics=[train_acc])
logging = LoggingHandler(metrics=[train_acc])
est.fit(train_data=train_data, epochs=num_epochs, event_handlers=[metric, logging])
# handler with mixed metrics, some handler use metrics prepared by estimator
# some handler use metrics user prepared
logging = LoggingHandler(metrics=[mx.gluon.metric.RMSE("val acc")])
with pytest.raises(ValueError):
est.fit(train_data=train_data, epochs=num_epochs, event_handlers=[logging])
# test handler order
train_metrics = est.train_metrics
val_metrics = est.val_metrics
early_stopping = EarlyStoppingHandler(monitor=val_metrics[0])
handlers = est._prepare_default_handlers(val_data=None, event_handlers=[early_stopping])
assert len(handlers) == 5
assert isinstance(handlers[0], GradientUpdateHandler)
assert isinstance(handlers[1], MetricHandler)
assert isinstance(handlers[4], LoggingHandler)
def test_val_net():
''' test estimator with different training and validation networks '''
''' test weight sharing of sequential networks without namescope '''
net = _get_test_network()
val_net = _get_test_network(params=net.collect_params())
dataloader, dataiter = _get_test_data()
num_epochs = 1
ctx = mx.cpu()
loss = gluon.loss.L2Loss()
val_loss = gluon.loss.L2Loss()
acc = mx.gluon.metric.Accuracy()
net.initialize(ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})
est = Estimator(net=net,
loss=loss,
train_metrics=acc,
trainer=trainer,
context=ctx,
val_loss=val_loss,
val_net=val_net)
with pytest.raises(RuntimeError):
est.fit(train_data=dataloader,
val_data=dataloader,
epochs=num_epochs)
''' test weight sharing of sequential networks with namescope '''
net = _get_test_network_with_namescope()
val_net = _get_test_network_with_namescope(params=net.collect_params())
net.initialize(ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})
est = Estimator(net=net,
loss=loss,
train_metrics=acc,
trainer=trainer,
context=ctx,
val_loss=val_loss,
val_net=val_net)
est.fit(train_data=dataloader,
val_data=dataloader,
epochs=num_epochs)
''' test partial weight sharing of two resnets '''
net = gluon.model_zoo.vision.resnet18_v1(pretrained=False, ctx=ctx)
net.output = gluon.nn.Dense(10)
val_net = gluon.model_zoo.vision.resnet18_v1(pretrained=False, ctx=ctx)
val_net.output = gluon.nn.Dense(10, params=net.output.collect_params())
dataset = gluon.data.ArrayDataset(mx.nd.zeros((10, 3, 224, 224)), mx.nd.zeros((10, 10)))
dataloader = gluon.data.DataLoader(dataset=dataset, batch_size=5)
net.initialize(ctx=ctx)
val_net.initialize(ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})
est = Estimator(net=net,
loss=loss,
train_metrics=acc,
trainer=trainer,
context=ctx,
val_loss=val_loss,
val_net=val_net)
est.fit(train_data=dataloader,
val_data=dataloader,
epochs=num_epochs)
def test_val_handlers():
net = _get_test_network()
train_data, _ = _get_test_data()
val_data, _ = _get_test_data()
num_epochs = 1
ctx = mx.cpu()
net.initialize(ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})
train_acc = mx.gluon.metric.RMSE()
loss = gluon.loss.L2Loss()
est = Estimator(net=net,
loss=loss,
train_metrics=train_acc,
trainer=trainer,
context=ctx)
with warnings.catch_warnings(record=True) as w:
est.fit(train_data=train_data, epochs=num_epochs)
est.evaluate(val_data=val_data)
logging = LoggingHandler(log_interval=1, metrics=est.val_metrics)
est.evaluate(val_data=val_data, event_handlers=[logging])
|
|
#!/usr/bin/env python2
"""Script to do basic sanity checking for target_link_libraries() commands in
CMakeLists.txt files.
Scans C++ sources specified in add_library() commands for includes that look
like they are in the Quickstep source tree, then makes sure that the
corresponding libraries appear in the target_link_libraries() command for the
library.
TODO List / Known Issues & Limitations:
- Script skips over targets that are built conditionally (i.e. that have
multiple add_library() commands) and just prints a warning to the user.
- Script only validates libraries, not executables.
- Script only checks quickstep includes and libraries, so it will not
detect missing third party libraries.
"""
# Copyright 2011-2015 Quickstep Technologies LLC.
# Copyright 2015 Pivotal Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# Don't scan these directories for quickstep modules.
EXCLUDED_TOP_LEVEL_DIRS = ["build", "third_party"]
# Explicitly ignored dependencies (special headers with no other quickstep
# dependencies).
IGNORED_DEPENDENCIES = frozenset(
["quickstep_threading_WinThreadsAPI",
"quickstep_utility_textbasedtest_TextBasedTest",
"quickstep_utility_textbasedtest_TextBasedTestDriver",
"quickstep_storage_bitweaving_BitWeavingHIndexSubBlock",
"quickstep_storage_bitweaving_BitWeavingIndexSubBlock",
"quickstep_storage_bitweaving_BitWeavingVIndexSubBlock"])
# States when scanning a CMakeLists.txt file.
CMAKE_SCANNING_NONE = 0
CMAKE_SCANNING_LIBRARY = 1
CMAKE_SCANNING_TARGET_LINK_LIBRARIES = 2
CMAKE_SCANNING_IGNORE = 3
def convert_path_to_targetname(include_path):
"""Convert an included header file's path to a quickstep library target in
cmake.
Args:
include_path (str): A header file path taken from a C++ include
statement.
Returns:
str: The target name in CMake that corresponds to the specified header.
"""
path_components = include_path.split("/")
for idx in range(len(path_components) - 1):
path_components[idx] = path_components[idx].replace("_", "")
if path_components[-1].endswith("_gen.hpp"):
# Generated header (e.g. parser or lexer).
path_components[-1] = path_components[-1][:-8]
elif path_components[-1].endswith(".hpp"):
# Regular header.
path_components[-1] = path_components[-1][:-4]
elif path_components[-1].endswith(".pb.h"):
# Generated protobuf header.
path_components[-1] = path_components[-1][:-5] + "_proto"
return "quickstep_" + "_".join(path_components)
def convert_proto_path_to_targetname(import_path):
"""Convert an imported proto's path to a quickstep library target in CMake.
Args:
import_path (str): A proto definition file path taken from a protobuf
import statement.
Returns:
str: The target name in CMake that corresponds to the specified proto
definition.
"""
path_components = import_path.split("/")
for idx in range(len(path_components) - 1):
path_components[idx] = path_components[idx].replace("_", "")
if path_components[-1].endswith(".proto"):
path_components[-1] = path_components[-1][:-6] + "_proto"
return "quickstep_" + "_".join(path_components)
def get_module_targetname_for_cmakelists(cmakelists_filename):
"""Determine what the name for the all-in-one module target should be based
on the CMakeLists.txt filename with path.
Args:
cmakelists_filename (str): CMakeLists.txt filename with path from
quickstep root.
Returns:
str: The target name in CMake that corresponds to the special
all-in-one library for the module described by the CMakeLists.txt
file.
"""
components = []
(head, tail) = os.path.split(cmakelists_filename)
while head != "":
(head, tail) = os.path.split(head)
if tail != ".":
components.append(tail.replace("_", ""))
components.append("quickstep")
components.reverse()
return "_".join(components)
def get_dependency_set_from_cpp_src(src_filename, qs_module_dirs):
"""Read the C++ source file at 'src_filename' and return a set of all
quickstep libraries it includes headers for.
Args:
src_filename (str): A path to a C++ source file (may be header or
implementation).
qs_module_dirs (List[str]): List of directories for top-level quickstep
modules
Returns:
Set[str]: A set of CMake target names for the quickstep library targets
that the C++ file includes.
"""
dependency_set = set()
with open(src_filename, "r") as src_file:
for line in src_file:
if line.startswith("#include \""):
include_filename = line[len("#include \""):]
include_filename = (
include_filename[:include_filename.find("\"")])
# Skip over CMake-generated config headers and -inl companion
# headers.
if not (include_filename.endswith("Config.h")
or include_filename.endswith("-inl.hpp")):
for module_dir in qs_module_dirs:
if include_filename.startswith(module_dir):
dependency_set.add(
convert_path_to_targetname(include_filename))
break
return dependency_set
def get_dependency_set_from_proto_src(src_filename, qs_module_dirs):
"""Read the protobuf definition file at 'src_filename' and return a set of
all other Quickstep proto libraries it imports.
Args:
src_filename (str): A path to a proto definition file.
qs_module_dirs (List[str]): List of directories for top-level quickstep
modules
Returns:
Set[str]: A set of CMake target names for the quickstep library targets
that the proto file imports.
"""
dependency_set = set()
with open(src_filename, "r") as src_file:
for line in src_file:
if line.startswith("import \""):
import_filename = line[len("import \""):]
import_filename = import_filename[:import_filename.find("\"")]
for module_dir in qs_module_dirs:
if import_filename.startswith(module_dir):
dependency_set.add(
convert_proto_path_to_targetname(import_filename))
break
return dependency_set
def process_add_library(qs_module_dirs,
directory,
add_library_args,
deps_from_includes,
skipped_targets,
generated_targets):
"""Process a CMake add_library() command while scanning a CMakeLists.txt
file.
Args:
qs_module_dirs (List[str]): List of directories for top-level quickstep
modules
directory (str): The directory that the CMakeLists.txt file we are
currently scanning resides in.
add_library_args (str): The arguments to an add_library() command in
CMakeLists.txt
deps_from_includes (Map[str, Set[str]]): A map from a CMake target name
to the set of other CMake targets it depends on, deduced based on
what headers the C++/proto sources for the target include. A new
entry will be added to this map for the target specified by the
add_library() command.
skipped_targets (Set[str]): A set of CMake target names that have been
skipped for dependency checking because multiple add_library()
commands specified the same target name. This probably means that
the target in question is built differently depending on some
configuration options or platform checks.
generated_targets (Set[str]): A set of CMake target names that appear
to be built from dynamically-generated source code that we can't
scan. Note, however, that we can and do scan proto definitions and
flex/bison sources for dependencies. An entry will be added to this
set of the given add_library() command references unscannable
generated sources.
"""
components = add_library_args.split()
if components[0].startswith("quickstep"):
if components[0] in deps_from_includes:
skipped_targets.add(components[0])
deps_from_includes[components[0]] = set()
return
deps = set()
for src_filename in components[1:]:
if src_filename.startswith("${"):
if (src_filename.endswith("proto_srcs}")
or src_filename.endswith("proto_hdrs}")):
# Scan protobuf definition instead of C++ source.
#
# src_filename has the form module_File_proto_srcs, so we
# split it by '_' and get the third-from-last part (i.e.
# the base filename without extension).
src_filename = src_filename.split("_")[-3] + ".proto"
full_src_filename = os.path.join(directory, src_filename)
deps.update(
get_dependency_set_from_proto_src(full_src_filename,
qs_module_dirs))
continue
elif src_filename.startswith("${BISON_"):
# Scan Bison parser source.
src_filename = (
src_filename[len("${BISON_"):-len("_OUTPUTS}")]
+ ".ypp")
elif src_filename.startswith("${FLEX_"):
# Scan Flex lexer source.
src_filename = (
src_filename[len("${FLEX_"):-len("_OUTPUTS}")]
+ ".lpp")
else:
generated_targets.add(components[0])
return
elif src_filename.startswith("\"${CMAKE_CURRENT_SOURCE_DIR}/"):
src_filename = src_filename[
len("\"${CMAKE_CURRENT_SOURCE_DIR}/"):-1]
full_src_filename = os.path.join(directory, src_filename)
deps.update(get_dependency_set_from_cpp_src(full_src_filename,
qs_module_dirs))
deps_from_includes[components[0]] = deps
def process_target_link_libraries(target_link_libraries_args,
deps_in_cmake):
"""Process a CMake target_link_libraries() while scanning a CMakeLists.txt
file.
Args:
target_link_libraries_args (str): The arguments to a
target_link_libraries() command in CMakeLists.txt
deps_in_cmake (Map[str, Set[str]]): A map of CMake target names to
their sets of dependencies (also CMake target names) specified by
target_link_libraries() commands. If the target being processed
already has an entry in the map, its set will be expanded with any
additional dependencies, otherwise a new entry will be created with
all the dependencies from the current target_link_libraries()
command. This way, if multiple target_link_libraries() commands are
processed for the same target, we will build up the union of all
dependencies for it (just like CMake does).
"""
components = target_link_libraries_args.split()
if components[0].startswith("quickstep"):
deps = set()
# Intentionally count the first part for self-includes
for component in components:
if component.startswith("quickstep"):
deps.add(component)
if components[0] in deps_in_cmake:
deps_in_cmake[components[0]].update(deps)
else:
deps_in_cmake[components[0]] = deps
def process_cmakelists_file(cmakelists_filename, qs_module_dirs):
"""Scan a CMakeLists.txt file and report any mistakes (missing or
superfluous dependencies in target_link_libraries() commands).
This function will deduce what other libraries a given library target
should depend on based on what headers are included in its source code. It
will then collect the set of link dependencies actually specified in
target_link_libraries() commands, and will print warnings about libraries
that appear in one set but not the other.
Args:
cmakelists_filename (str): The path to a CMakeLists.txt file to scan
and validate.
qs_module_dirs (List[str]): List of directories for top-level quickstep
modules.
Returns:
Tuple[Set[str], Set[str], Set[str]]: First element is the set of
targets that failed validation because they had missing and/or
superfluous dependencies. Second element is the set of targets
that were skipped over because they had multiple add_library()
commands (probably because they are built differently depending on
configuration options or platform checks). Third element is the
set of targets that were skipped because they appear to be built
from dynamically-generated source code (although proto definitions
and flex/bison sources are detected and scannned for dependencies).
"""
directory = os.path.dirname(cmakelists_filename)
module_targetname = get_module_targetname_for_cmakelists(
cmakelists_filename)
deps_from_includes = {}
deps_in_cmake = {}
validation_failed_targets = set()
skipped_targets = set()
generated_targets = set()
scan_state = CMAKE_SCANNING_NONE
previous_state = CMAKE_SCANNING_NONE
stitched_string = ""
with open(cmakelists_filename, "r") as cmakelists_file:
for line in cmakelists_file:
if ("CMAKE_VALIDATE_IGNORE_BEGIN" in line and
scan_state != CMAKE_SCANNING_IGNORE):
previous_state = scan_state
scan_state = CMAKE_SCANNING_IGNORE
continue
if scan_state == CMAKE_SCANNING_IGNORE:
if "CMAKE_VALIDATE_IGNORE_END" in line:
scan_state = previous_state
elif "CMAKE_VALIDATE_IGNORE_BEGIN" in line:
print "Nested IGNORE_BEGIN directives found in: "\
+ cmakelists_filename + ", exiting"
exit(-1)
else:
continue
elif scan_state == CMAKE_SCANNING_NONE:
add_library_pos = line.find("add_library(")
if add_library_pos != -1:
scan_state = CMAKE_SCANNING_LIBRARY
stitched_string = (
line[add_library_pos + len("add_library("):])
closing_paren_pos = stitched_string.find(")")
if closing_paren_pos != -1:
stitched_string = stitched_string[:closing_paren_pos]
process_add_library(qs_module_dirs,
directory,
stitched_string,
deps_from_includes,
skipped_targets,
generated_targets)
stitched_string = ""
scan_state = CMAKE_SCANNING_NONE
else:
target_link_libraries_pos = line.find(
"target_link_libraries(")
if target_link_libraries_pos != -1:
scan_state = CMAKE_SCANNING_TARGET_LINK_LIBRARIES
stitched_string = (
line[target_link_libraries_pos
+ len("target_link_libraries("):])
closing_paren_pos = stitched_string.find(")")
if closing_paren_pos != -1:
stitched_string = (
stitched_string[:closing_paren_pos])
process_target_link_libraries(stitched_string,
deps_in_cmake)
stitched_string = ""
scan_state = CMAKE_SCANNING_NONE
elif scan_state == CMAKE_SCANNING_LIBRARY:
closing_paren_pos = line.find(")")
if closing_paren_pos == -1:
stitched_string += line
else:
stitched_string += line[:closing_paren_pos]
process_add_library(qs_module_dirs,
directory,
stitched_string,
deps_from_includes,
skipped_targets,
generated_targets)
stitched_string = ""
scan_state = CMAKE_SCANNING_NONE
elif scan_state == CMAKE_SCANNING_TARGET_LINK_LIBRARIES:
closing_paren_pos = line.find(")")
if closing_paren_pos == -1:
stitched_string += line
else:
stitched_string += line[:closing_paren_pos]
process_target_link_libraries(stitched_string,
deps_in_cmake)
stitched_string = ""
scan_state = CMAKE_SCANNING_NONE
# After scanning, report any missing dependencies.
for target, include_deps in deps_from_includes.iteritems():
if target in skipped_targets:
pass
elif len(include_deps) != 0:
if target not in deps_in_cmake:
if not (target in include_deps and len(include_deps) == 1):
validation_failed_targets.add(target)
print "Missing target_link_libraries() for " + target + ":"
for dep in sorted(include_deps):
print "\t" + dep
else:
missing_deps = (include_deps
- deps_in_cmake[target]
- IGNORED_DEPENDENCIES)
if len(missing_deps) != 0:
validation_failed_targets.add(target)
print "Missing target_link_libraries() for " + target + ":"
for dep in sorted(missing_deps):
print "\t" + dep
elif target == module_targetname:
# Special case hack for module all-in-one library
missing_deps = (frozenset(deps_from_includes.keys())
- deps_in_cmake[target])
# Filter out test-only libraries.
true_missing_deps = set()
for dep in missing_deps:
if not dep.startswith(module_targetname + "_tests"):
true_missing_deps.add(dep)
if len(true_missing_deps) != 0:
validation_failed_targets.add(target)
print "Missing target_link_libraries() for " + target + ":"
for dep in sorted(true_missing_deps):
print "\t" + dep
# Also report possibly superfluous extra dependencies.
for target, cmake_deps in deps_in_cmake.iteritems():
if (target not in skipped_targets) and (target in deps_from_includes):
extra_deps = cmake_deps - deps_from_includes[target]
if target in extra_deps:
extra_deps.remove(target)
if len(extra_deps) != 0 and target != module_targetname:
validation_failed_targets.add(target)
print ("Possibly superfluous target_link_libraries() for "
+ target + ":")
for dep in sorted(extra_deps):
print "\t" + dep
return (validation_failed_targets, skipped_targets, generated_targets)
def main(cmakelists_to_process):
"""Main function for script which scans and analyzes CMakeLists.txt files
and prints warnings about missing or superfluous dependencies, and about
targets that could not be automatically scanned and should be manually
checked.
Args:
cmakelists_to_process (List[str]): A list of relative paths of
CMakeLists.txt files to scan and report on. If empty, this function
will instead recursively walk the current working directory and
scan every CMakeLists.txt file that it finds.
Returns:
int: The total number of targets that failed validation because of
missing or superfluous dependencies.
"""
if not os.getcwd().endswith("quickstep"):
print ("WARNING: you don't appear to be running in the root quickstep "
"source directory. Don't blame me if something goes wrong.")
qs_module_dirs = []
for filename in os.listdir("."):
if (os.path.isdir(filename)
and not filename.startswith(".")
and filename not in EXCLUDED_TOP_LEVEL_DIRS):
qs_module_dirs.append(filename)
if len(cmakelists_to_process) == 0:
for (dirpath, dirnames, filenames) in os.walk('.'):
skip = False
for excluded_dir in EXCLUDED_TOP_LEVEL_DIRS:
if dirpath.startswith(excluded_dir):
skip = True
break
if not skip:
if "CMakeLists.txt" in filenames:
cmakelists_to_process.append(
os.path.join(dirpath, "CMakeLists.txt"))
global_validation_failed_targets = set()
global_skipped_targets = set()
global_generated_targets = set()
for cmakelists_filename in cmakelists_to_process:
(local_validation_failed_targets,
local_skipped_targets,
local_generated_targets) = (
process_cmakelists_file(cmakelists_filename, qs_module_dirs))
global_validation_failed_targets.update(
local_validation_failed_targets)
global_skipped_targets.update(local_skipped_targets)
global_generated_targets.update(local_generated_targets)
if len(global_skipped_targets) != 0:
print ("WARNING: The following targets had multiple add_library() "
+ "commands and were NOT checked by this script (they should "
+ "be manually checked):")
for target in sorted(global_skipped_targets):
print "\t" + target
if len(global_generated_targets) != 0:
print ("INFO: The add_library() commands for the following targets "
+ "appear to reference generated sources, so they were not "
+ "checked):")
for target in sorted(global_generated_targets):
print "\t" + target
return len(global_validation_failed_targets)
if __name__ == "__main__":
if main(sys.argv[1:]) > 0:
sys.exit(1)
else:
sys.exit(0)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A factory that creates UncommittedBundles."""
from __future__ import absolute_import
from apache_beam import pvalue
from apache_beam.utils.windowed_value import WindowedValue
class BundleFactory(object):
"""BundleFactory creates output bundles to be used by transform evaluators.
Args:
stacked: whether or not to stack the WindowedValues within the bundle
in case consecutive ones share the same timestamp and windows.
DirectRunnerOptions.direct_runner_use_stacked_bundle controls this option.
"""
def __init__(self, stacked):
self._stacked = stacked
def create_bundle(self, output_pcollection):
return Bundle(output_pcollection, self._stacked)
def create_empty_committed_bundle(self, output_pcollection):
bundle = self.create_bundle(output_pcollection)
bundle.commit(None)
return bundle
# a bundle represents a unit of work that will be processed by a transform.
class Bundle(object):
"""Part of a PCollection with output elements.
Part of a PCollection. Elements are output to a bundle, which will cause them
to be executed by PTransform that consume the PCollection this bundle is a
part of at a later point. It starts as an uncommitted bundle and can have
elements added to it. It needs to be committed to make it immutable before
passing it to a downstream ptransform.
The stored elements are WindowedValues, which contains timestamp and windows
information.
Bundle internally optimizes storage by stacking elements with the same
timestamp and windows into StackedWindowedValues, and then returns an iterable
to restore WindowedValues upon get_elements() call.
When this optimization is not desired, it can be avoided by an option when
creating bundles, like:::
b = Bundle(stacked=False)
"""
class StackedWindowedValues(object):
"""A stack of WindowedValues with the same timestamp and windows.
It must be initialized from a single WindowedValue.
Example:::
s = StackedWindowedValues(windowed_value)
if (another_windowed_value.timestamp == s.timestamp and
another_windowed_value.windows == s.windows):
s.add_value(another_windowed_value.value)
windowed_values = [wv for wv in s.windowed_values()]
# now windowed_values equals to [windowed_value, another_windowed_value]
"""
def __init__(self, initial_windowed_value):
self._initial_windowed_value = initial_windowed_value
self._appended_values = []
@property
def timestamp(self):
return self._initial_windowed_value.timestamp
@property
def windows(self):
return self._initial_windowed_value.windows
def add_value(self, value):
self._appended_values.append(value)
def windowed_values(self):
# yield first windowed_value as is, then iterate through
# _appended_values to yield WindowedValue on the fly.
yield self._initial_windowed_value
for v in self._appended_values:
yield WindowedValue(v, self._initial_windowed_value.timestamp,
self._initial_windowed_value.windows)
def __init__(self, pcollection, stacked=True):
assert (isinstance(pcollection, pvalue.PCollection)
or isinstance(pcollection, pvalue.PCollectionView))
self._pcollection = pcollection
self._elements = []
self._stacked = stacked
self._committed = False
self._tag = None # optional tag information for this bundle
def get_elements_iterable(self, make_copy=False):
"""Returns iterable elements.
Args:
make_copy: whether to force returning copy or yielded iterable.
Returns:
unstacked elements,
in the form of iterable if committed and make_copy is not True,
or as a list of copied WindowedValues.
"""
if not self._stacked:
if self._committed and not make_copy:
return self._elements
else:
return list(self._elements)
def iterable_stacked_or_elements(elements):
for e in elements:
if isinstance(e, Bundle.StackedWindowedValues):
for w in e.windowed_values():
yield w
else:
yield e
if self._committed and not make_copy:
return iterable_stacked_or_elements(self._elements)
else:
# returns a copy.
return [e for e in iterable_stacked_or_elements(self._elements)]
def has_elements(self):
return len(self._elements) > 0
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, value):
assert not self._tag
self._tag = value
@property
def pcollection(self):
"""PCollection that the elements of this UncommittedBundle belong to."""
return self._pcollection
def add(self, element):
"""Outputs an element to this bundle.
Args:
element: WindowedValue
"""
assert not self._committed
if not self._stacked:
self._elements.append(element)
return
if (len(self._elements) > 0 and
(isinstance(self._elements[-1], WindowedValue) or
isinstance(self._elements[-1], Bundle.StackedWindowedValues)) and
self._elements[-1].timestamp == element.timestamp and
self._elements[-1].windows == element.windows):
if isinstance(self._elements[-1], WindowedValue):
self._elements[-1] = Bundle.StackedWindowedValues(self._elements[-1])
self._elements[-1].add_value(element.value)
else:
self._elements.append(element)
def output(self, element):
self.add(element)
def commit(self, synchronized_processing_time):
"""Commits this bundle.
Uncommitted bundle will become committed (immutable) after this call.
Args:
synchronized_processing_time: the synchronized processing time at which
this bundle was committed
"""
assert not self._committed
self._committed = True
self._elements = tuple(self._elements)
self._synchronized_processing_time = synchronized_processing_time
|
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import cos, sin, pi
from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
assert_, suppress_warnings)
from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
cumtrapz, quad, simps, fixed_quad)
from scipy.integrate.quadrature import AccuracyWarning
class TestFixedQuad(object):
def test_scalar(self):
n = 4
func = lambda x: x**(2*n - 1)
expected = 1/(2*n)
got, _ = fixed_quad(func, 0, 1, n=n)
# quadrature exact for this input
assert_allclose(got, expected, rtol=1e-12)
def test_vector(self):
n = 4
p = np.arange(1, 2*n)
func = lambda x: x**p[:,None]
expected = 1/(p + 1)
got, _ = fixed_quad(func, 0, 1, n=n)
assert_allclose(got, expected, rtol=1e-12)
class TestQuadrature(object):
def quad(self, x, a, b, args):
raise NotImplementedError
def test_quadrature(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_quadrature_rtol(self):
def myfunc(x, n, z): # Bessel function integrand
return 1e90 * cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_quadrature_miniter(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
table_val = 0.30614353532540296487
for miniter in [5, 52]:
val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
assert_almost_equal(val, table_val, decimal=7)
assert_(err < 1.0)
def test_quadrature_single_args(self):
def myfunc(x, n):
return 1e90 * cos(n*x-1.8*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romberg(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romberg_rtol(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return 1e19*cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
table_val = 1e19*0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romb(self):
assert_equal(romb(np.arange(17)), 128)
def test_romb_gh_3731(self):
# Check that romb makes maximal use of data points
x = np.arange(2**4+1)
y = np.cos(0.2*x)
val = romb(y)
val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
assert_allclose(val, val2, rtol=1e-8, atol=0)
# should be equal to romb with 2**k+1 samples
with suppress_warnings() as sup:
sup.filter(AccuracyWarning, "divmax .4. exceeded")
val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4)
assert_allclose(val, val3, rtol=1e-12, atol=0)
def test_non_dtype(self):
# Check that we work fine with functions returning float
import math
valmath = romberg(math.sin, 0, 1)
expected_val = 0.45969769413185085
assert_almost_equal(valmath, expected_val, decimal=7)
def test_newton_cotes(self):
"""Test the first few degrees, for evenly spaced points."""
n = 1
wts, errcoff = newton_cotes(n, 1)
assert_equal(wts, n*np.array([0.5, 0.5]))
assert_almost_equal(errcoff, -n**3/12.0)
n = 2
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
assert_almost_equal(errcoff, -n**5/2880.0)
n = 3
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
assert_almost_equal(errcoff, -n**5/6480.0)
n = 4
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
assert_almost_equal(errcoff, -n**7/1935360.0)
def test_newton_cotes2(self):
"""Test newton_cotes with points that are not evenly spaced."""
x = np.array([0.0, 1.5, 2.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 8.0/3
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
x = np.array([0.0, 1.4, 2.1, 3.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 9.0
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
def test_simps(self):
y = np.arange(17)
assert_equal(simps(y), 128)
assert_equal(simps(y, dx=0.5), 64)
assert_equal(simps(y, x=np.linspace(0, 4, 17)), 32)
y = np.arange(4)
x = 2**y
assert_equal(simps(y, x=x, even='avg'), 13.875)
assert_equal(simps(y, x=x, even='first'), 13.75)
assert_equal(simps(y, x=x, even='last'), 14)
class TestCumtrapz(object):
def test_1d(self):
x = np.linspace(-2, 2, num=5)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = [0., -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, x, initial=None)
assert_allclose(y_int, y_expected[1:])
def test_y_nd_x_nd(self):
x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = np.array([[[0., 0.5, 2., 4.5],
[0., 4.5, 10., 16.5]],
[[0., 8.5, 18., 28.5],
[0., 12.5, 26., 40.5]],
[[0., 16.5, 34., 52.5],
[0., 20.5, 42., 64.5]]])
assert_allclose(y_int, y_expected)
# Try with all axes
shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
for axis, shape in zip([0, 1, 2], shapes):
y_int = cumtrapz(y, x, initial=3.45, axis=axis)
assert_equal(y_int.shape, (3, 2, 4))
y_int = cumtrapz(y, x, initial=None, axis=axis)
assert_equal(y_int.shape, shape)
def test_y_nd_x_1d(self):
y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
x = np.arange(4)**2
# Try with all axes
ys_expected = (
np.array([[[4., 5., 6., 7.],
[8., 9., 10., 11.]],
[[40., 44., 48., 52.],
[56., 60., 64., 68.]]]),
np.array([[[2., 3., 4., 5.]],
[[10., 11., 12., 13.]],
[[18., 19., 20., 21.]]]),
np.array([[[0.5, 5., 17.5],
[4.5, 21., 53.5]],
[[8.5, 37., 89.5],
[12.5, 53., 125.5]],
[[16.5, 69., 161.5],
[20.5, 85., 197.5]]]))
for axis, y_expected in zip([0, 1, 2], ys_expected):
y_int = cumtrapz(y, x=x[:y.shape[axis]], axis=axis, initial=None)
assert_allclose(y_int, y_expected)
def test_x_none(self):
y = np.linspace(-2, 2, num=5)
y_int = cumtrapz(y)
y_expected = [-1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, initial=1.23)
y_expected = [1.23, -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3)
y_expected = [-4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3, initial=1.23)
y_expected = [1.23, -4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
|
|
"""Just an empty models file to let the testrunner recognize this as app."""
from decimal import Decimal
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import get_language, ugettext_lazy as _
from hvad.descriptors import LanguageCodeAttribute, TranslatedAttribute
from hvad.models import TranslatableModel, TranslatedFields
# When using the TranslatableModel class, it still uses the default Django
# related manager for some reason instead of the translation aware one. It
# therefore returns only the untranslated/sharded model instance. When you
# then call a custom descriptor, the descriptor itself will query for the
# translation. Unfortunately the used querying method is fairly simple and if
# there is no object for this language, we get no results. With this update,
# we instead of showing nothing retrieve the english version as fallback,
# since it is always the first to be created.
class BetterTranslatedAttribute(TranslatedAttribute):
"""
Customized TranslatedAttribute so that we fetch the english variant of the
attribute if the requested translation does not exist.
Basic translated attribute descriptor.
Proxies attributes from the shared instance to the translated instance.
"""
def translation(self, instance): # pragma: nocover
def get_translation(instance, language_code=None):
opts = instance._meta
if not language_code:
language_code = get_language()
accessor = getattr(instance, opts.translations_accessor)
try:
return accessor.get(language_code=language_code)
except ObjectDoesNotExist:
# doing a fallback in case the requested language doesn't exist
return accessor.get(language_code='en')
cached = getattr(instance, self.opts.translations_cache, None)
if not cached:
cached = get_translation(instance)
setattr(instance, self.opts.translations_cache, cached)
return cached
class BetterTranslatedAttributeMixin(object):
@classmethod
def contribute_translations(cls, rel):
"""
Contribute translations options to the inner Meta class and set the
descriptors.
This get's called from TranslatableModelBase.__new__
"""
opts = cls._meta
opts.translations_accessor = rel.get_accessor_name()
opts.translations_model = rel.model
opts.translations_cache = '%s_cache' % rel.get_accessor_name()
trans_opts = opts.translations_model._meta
# Set descriptors
ignore_fields = [
'pk',
'master',
opts.translations_model._meta.pk.name,
]
for field in trans_opts.fields:
if field.name in ignore_fields:
continue
if field.name == 'language_code':
attr = LanguageCodeAttribute(opts)
else:
attr = BetterTranslatedAttribute(opts, field.name)
setattr(cls, field.name, attr)
class DatedValue(models.Model):
"""
The value, that is attached to an object for a given date.
:_ctype: Will always be the same ctype as the type has.
:date: The optional date, when this value applies.
:object: The related object.
:object_id: The id of the object, that this value is for.
:type: The DatedValueType this value belongs to.
:value: The decimal value, that is attached.
"""
_ctype = models.ForeignKey(
ContentType,
verbose_name=_('Content Type'),
)
date = models.DateField(
verbose_name=_('Date'),
blank=True, null=True,
)
object = generic.GenericForeignKey(
ct_field='_ctype',
fk_field='object_id',
)
object_id = models.PositiveIntegerField(
verbose_name=_('Object id'),
)
type = models.ForeignKey(
'dated_values.DatedValueType',
verbose_name=_('Type'),
)
value = models.DecimalField(
verbose_name=_('Value'),
max_digits=24,
decimal_places=8,
)
def __unicode__(self):
return '[{0}] {1} ({2}): {3}'.format(
self.date, self.object, self.type, self.normal_value)
def clean(self):
if self.value:
split_value = self.value.to_eng_string().split('.')
if len(split_value) > 1 and len(
split_value[1]) > self.type.decimal_places:
raise ValidationError(_(
'The value can only have {0} decimal places.'.format(
self.type.decimal_places)))
@property
def normal_value(self):
"""
Returns the normalized value according to the settings in the type.
"""
if self.value:
return getattr(self, 'value').quantize(Decimal(
'0' * 24 + '.' + '0' * self.type.decimal_places))
@normal_value.setter
def normal_value(self, value):
setattr(self, 'value', value)
def save(self, *args, **kwargs):
self._ctype = self.type.ctype
super(DatedValue, self).save(*args, **kwargs)
class Meta:
ordering = ['date', ]
class DatedValueType(BetterTranslatedAttributeMixin, TranslatableModel):
"""
The type of a dated value and what model type it belongs to.
:ctype: The ctype of the related model.
:decimal_places: If you want to limit the decimal places, that the
``normal_value`` attribute outputs, you can specify an alternative here.
Defaults to 2.
:editable: True, if the valuetype is editable by an admin. False will only
display them.
:hidden: True, if the type should not at all be displayed on the management
page.
:slug: A unique identifier.
translated:
:name: A displayable name for the value type.
"""
ctype = models.ForeignKey(
ContentType,
verbose_name=_('Content Type'),
)
decimal_places = models.PositiveIntegerField(
verbose_name=_('Decimal places'),
default=2,
)
slug = models.SlugField(
verbose_name=_('Slug'),
max_length=64,
unique=True,
)
translations = TranslatedFields(
name=models.CharField(
verbose_name=_('Name'),
max_length=256,
)
)
editable = models.BooleanField(
verbose_name=_('Editable'),
default=True,
)
hidden = models.BooleanField(
verbose_name=_('Hidden'),
default=False,
)
def __unicode__(self):
return '{0} ({1})'.format(
self.safe_translation_getter('name', self.slug), self.ctype)
def clean(self):
if self.decimal_places > 8:
raise ValidationError(_(
'decimal_places cannot be bigger than 8.'))
|
|
from symbol.builder import add_anchor_to_arg
from symbol.builder import ResNetV1bFPN as Backbone
from models.FPN.builder import FPNNeck as Neck
from models.FPN.builder import FPNRoiAlign as RoiExtractor
from models.FPN.builder import FPNBbox2fcHead as BboxHead
from mxnext.complicate import normalizer_factory
from models.maskrcnn.builder import MaskFasterRcnn as Detector
from models.maskrcnn.builder import MaskFPNRpnHead as RpnHead
from models.maskrcnn.builder import MaskFasterRcnn4ConvHead as MaskHead
from models.maskrcnn.builder import BboxPostProcessor
from models.maskrcnn.process_output import process_output
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
loader_worker = 8
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="syncbn", ndev=len(KvstoreParam.gpus))
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
depth = 50
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nnvm_proposal = True
nnvm_rpn_target = False
class anchor_generate:
scale = (8,)
ratio = (0.5, 1.0, 2.0)
stride = (4, 8, 16, 32, 64)
image_anchor = 256
max_side = 1400
class anchor_assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
image_anchor = 256
pos_fraction = 0.5
class head:
conv_channel = 256
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 2000 if is_train else 1000
post_nms_top_n = 2000 if is_train else 1000
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = False
image_roi = 512
fg_fraction = 0.25
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 81
class_agnostic = False
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 512
batch_image = General.batch_image
class regress_target:
class_agnostic = False
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class MaskParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
resolution = 28
dim_reduced = 256
num_fg_roi = int(RpnParam.subsample_proposal.image_roi * RpnParam.subsample_proposal.fg_fraction)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
class MaskRoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 14
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.02 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
mult = 2
begin_epoch = 0
end_epoch = 6 * mult
lr_iter = [60000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3.0
iter = 500
class TestParam:
min_det_score = 0.05
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: process_output(x, y)
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam, MaskParam)
roi_extractor = RoiExtractor(RoiParam)
mask_roi_extractor = RoiExtractor(MaskRoiParam)
bbox_head = BboxHead(BboxParam)
mask_head = MaskHead(BboxParam, MaskParam, MaskRoiParam)
bbox_post_processer = BboxPostProcessor(TestParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head, bbox_post_processer)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = True
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = []
def process_weight(sym, arg, aux):
for stride in RpnParam.anchor_generate.stride:
add_anchor_to_arg(
sym, arg, aux, RpnParam.anchor_generate.max_side,
stride, RpnParam.anchor_generate.scale,
RpnParam.anchor_generate.ratio)
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
# data processing
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
max_len_gt_poly = 2500
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.stride = (4, 8, 16, 32, 64)
self.short = (200, 100, 50, 25, 13)
self.long = (334, 167, 84, 42, 21)
scales = (8)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage
from models.maskrcnn.input import PreprocessGtPoly, EncodeGtPoly, \
Resize2DImageBboxMask, Flip2DImageBboxMask, Pad2DImageBboxMask
from models.FPN.input import PyramidAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
PreprocessGtPoly(),
Resize2DImageBboxMask(ResizeParam),
Flip2DImageBboxMask(),
EncodeGtPoly(PadParam),
Pad2DImageBboxMask(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["im_info", "gt_bbox", "gt_poly"]
if not RpnParam.nnvm_rpn_target:
transform.append(PyramidAnchorTarget2D(AnchorTarget2DParam()))
label_name += ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
from models.maskrcnn.metric import SigmoidCELossMetric
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
mask_cls_metric = SigmoidCELossMetric(
"MaskCE",
["mask_loss_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric,]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
|
|
from __future__ import absolute_import
from django.db.models.signals import post_save
from sentry import analytics
from sentry.adoption import manager
from sentry.models import FeatureAdoption, GroupTombstone, Organization
from sentry.plugins import IssueTrackingPlugin, IssueTrackingPlugin2
from sentry.plugins.bases.notify import NotificationPlugin
from sentry.receivers.rules import DEFAULT_RULE_LABEL, DEFAULT_RULE_DATA
from sentry.signals import (
advanced_search,
advanced_search_feature_gated,
alert_rule_created,
data_scrubber_enabled,
deploy_created,
event_processed,
first_event_received,
inbound_filter_toggled,
integration_added,
integration_issue_created,
integration_issue_linked,
issue_assigned,
issue_resolved,
issue_ignored,
issue_deleted,
member_joined,
ownership_rule_created,
plugin_enabled,
project_created,
release_created,
repo_linked,
save_search_created,
sso_enabled,
team_created,
user_feedback_received,
)
from sentry.utils.javascript import has_sourcemap
DEFAULT_TAGS = frozenset(
[
"level",
"logger",
"transaction",
"url",
"browser",
"sentry:user",
"os",
"server_name",
"device",
"os.name",
"browser.name",
"sentry:release",
"environment",
"device.family",
"site",
"version",
"interface_type",
"rake_task",
"runtime",
"runtime.name",
"type",
"php_version",
"app",
"app.device",
"locale",
"os_version",
"device_model",
"deviceModel",
"sentry_version",
]
)
# First Event
@first_event_received.connect(weak=False)
def record_first_event(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="first_event", complete=True
)
@event_processed.connect(weak=False)
def record_event_processed(project, event, **kwargs):
feature_slugs = []
platform = event.group.platform if event.group else event.platform
# Platform
if platform in manager.location_slugs("language"):
feature_slugs.append(platform)
# Release Tracking
if event.get_tag("sentry:release"):
feature_slugs.append("release_tracking")
# Environment Tracking
if event.get_tag("environment"):
feature_slugs.append("environment_tracking")
# User Tracking
user_context = event.data.get("user")
# We'd like them to tag with id or email.
# Certain SDKs automatically tag with ip address.
# Check to make sure more the ip address is being sent.
# testing for this in test_no_user_tracking_for_ip_address_only
# list(d.keys()) pattern is to make this python3 safe
if user_context and list(user_context.keys()) != ["ip_address"]:
feature_slugs.append("user_tracking")
# Custom Tags
if set(tag[0] for tag in event.tags) - DEFAULT_TAGS:
feature_slugs.append("custom_tags")
# Sourcemaps
if has_sourcemap(event):
feature_slugs.append("source_maps")
# Breadcrumbs
if event.data.get("breadcrumbs"):
feature_slugs.append("breadcrumbs")
if not feature_slugs:
return
FeatureAdoption.objects.bulk_record(project.organization_id, feature_slugs)
@user_feedback_received.connect(weak=False)
def record_user_feedback(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="user_feedback", complete=True
)
@project_created.connect(weak=False)
def record_project_created(project, user, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="first_project", complete=True
)
@member_joined.connect(weak=False)
def record_member_joined(member, organization, **kwargs):
FeatureAdoption.objects.record(
organization_id=member.organization_id, feature_slug="invite_team", complete=True
)
analytics.record("organization.joined", user_id=member.user.id, organization_id=organization.id)
@issue_assigned.connect(weak=False)
def record_issue_assigned(project, group, user, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="assignment", complete=True
)
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = project.organization.get_default_owner().id
analytics.record(
"issue.assigned",
user_id=user_id,
default_user_id=default_user_id,
organization_id=project.organization_id,
group_id=group.id,
)
@issue_resolved.connect(weak=False)
def record_issue_resolved(organization_id, project, group, user, resolution_type, **kwargs):
""" There are three main types of ways to resolve issues
1) via a release (current release, next release, or other)
2) via commit (in the UI with the commit hash (marked as "in_commit")
or tagging the issue in a commit (marked as "with_commit"))
3) now
"""
if resolution_type in ("in_next_release", "in_release"):
FeatureAdoption.objects.record(
organization_id=organization_id, feature_slug="resolved_in_release", complete=True
)
if resolution_type == "with_commit":
FeatureAdoption.objects.record(
organization_id=organization_id, feature_slug="resolved_with_commit", complete=True
)
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = project.organization.get_default_owner().id
analytics.record(
"issue.resolved",
user_id=user_id,
default_user_id=default_user_id,
organization_id=organization_id,
group_id=group.id,
resolution_type=resolution_type,
)
@advanced_search.connect(weak=False)
def record_advanced_search(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="advanced_search", complete=True
)
@advanced_search_feature_gated.connect(weak=False)
def record_advanced_search_feature_gated(user, organization, **kwargs):
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = organization.get_default_owner().id
analytics.record(
"advanced_search.feature_gated",
user_id=user_id,
default_user_id=default_user_id,
organization_id=organization.id,
)
@save_search_created.connect(weak=False)
def record_save_search_created(project, user, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="saved_search", complete=True
)
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = project.organization.get_default_owner().id
analytics.record(
"search.saved",
user_id=user_id,
default_user_id=default_user_id,
project_id=project.id,
organization_id=project.organization_id,
)
@inbound_filter_toggled.connect(weak=False)
def record_inbound_filter_toggled(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="inbound_filters", complete=True
)
@alert_rule_created.connect(weak=False)
def record_alert_rule_created(user, project, rule, **kwargs):
if rule.label == DEFAULT_RULE_LABEL and rule.data == DEFAULT_RULE_DATA:
return
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="alert_rules", complete=True
)
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = project.organization.get_default_owner().id
analytics.record(
"alert.created",
user_id=user_id,
default_user_id=default_user_id,
organization_id=project.organization_id,
rule_id=rule.id,
actions=[a["id"] for a in rule.data.get("actions", [])],
)
@plugin_enabled.connect(weak=False)
def record_plugin_enabled(plugin, project, user, **kwargs):
if isinstance(plugin, (IssueTrackingPlugin, IssueTrackingPlugin2)):
FeatureAdoption.objects.record(
organization_id=project.organization_id,
feature_slug="issue_tracker_integration",
complete=True,
)
elif isinstance(plugin, NotificationPlugin):
FeatureAdoption.objects.record(
organization_id=project.organization_id,
feature_slug="notification_integration",
complete=True,
)
@sso_enabled.connect(weak=False)
def record_sso_enabled(organization, user, provider, **kwargs):
FeatureAdoption.objects.record(
organization_id=organization.id, feature_slug="sso", complete=True
)
analytics.record(
"sso.enabled", user_id=user.id, organization_id=organization.id, provider=provider
)
@data_scrubber_enabled.connect(weak=False)
def record_data_scrubber_enabled(organization, **kwargs):
FeatureAdoption.objects.record(
organization_id=organization.id, feature_slug="data_scrubbers", complete=True
)
def deleted_and_discarded_issue(instance, created, **kwargs):
if created:
FeatureAdoption.objects.record(
organization_id=instance.project.organization_id, feature_slug="delete_and_discard"
)
@repo_linked.connect(weak=False)
def record_repo_linked(repo, user, **kwargs):
FeatureAdoption.objects.record(
organization_id=repo.organization_id, feature_slug="repo_linked", complete=True
)
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = Organization.objects.get(id=repo.organization_id).get_default_owner().id
analytics.record(
"repo.linked",
user_id=user_id,
default_user_id=default_user_id,
organization_id=repo.organization_id,
repository_id=repo.id,
provider=repo.provider,
)
@release_created.connect(weak=False)
def record_release_created(release, **kwargs):
FeatureAdoption.objects.record(
organization_id=release.organization_id, feature_slug="release_created", complete=True
)
@deploy_created.connect(weak=False)
def record_deploy_created(deploy, **kwargs):
FeatureAdoption.objects.record(
organization_id=deploy.organization_id, feature_slug="deploy_created", complete=True
)
@ownership_rule_created.connect(weak=False)
def record_ownership_rule_created(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id,
feature_slug="ownership_rule_created",
complete=True,
)
@issue_ignored.connect(weak=False)
def record_issue_ignored(project, user, group_list, activity_data, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="issue_ignored", complete=True
)
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = project.organization.get_default_owner().id
for group in group_list:
analytics.record(
"issue.ignored",
user_id=user_id,
default_user_id=default_user_id,
organization_id=project.organization_id,
group_id=group.id,
ignore_duration=activity_data.get("ignoreDuration"),
ignore_count=activity_data.get("ignoreCount"),
ignore_window=activity_data.get("ignoreWindow"),
ignore_user_count=activity_data.get("ignoreUserCount"),
ignore_user_window=activity_data.get("ignoreUserWindow"),
)
@team_created.connect(weak=False)
def record_team_created(organization, user, team, **kwargs):
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = organization.get_default_owner().id
analytics.record(
"team.created",
user_id=user_id,
default_user_id=default_user_id,
organization_id=organization.id,
team_id=team.id,
)
@integration_added.connect(weak=False)
def record_integration_added(integration, organization, user, **kwargs):
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = organization.get_default_owner().id
analytics.record(
"integration.added",
user_id=user_id,
default_user_id=default_user_id,
organization_id=organization.id,
provider=integration.provider,
id=integration.id,
)
@integration_issue_created.connect(weak=False)
def record_integration_issue_created(integration, organization, user, **kwargs):
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = organization.get_default_owner().id
analytics.record(
"integration.issue.created",
user_id=user_id,
default_user_id=default_user_id,
organization_id=organization.id,
provider=integration.provider,
id=integration.id,
)
@integration_issue_linked.connect(weak=False)
def record_integration_issue_linked(integration, organization, user, **kwargs):
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = organization.get_default_owner().id
analytics.record(
"integration.issue.linked",
user_id=user_id,
default_user_id=default_user_id,
organization_id=organization.id,
provider=integration.provider,
id=integration.id,
)
@issue_deleted.connect(weak=False)
def record_issue_deleted(group, user, delete_type, **kwargs):
if user and user.is_authenticated():
user_id = default_user_id = user.id
else:
user_id = None
default_user_id = group.project.organization.get_default_owner().id
analytics.record(
"issue.deleted",
user_id=user_id,
default_user_id=default_user_id,
organization_id=group.project.organization_id,
group_id=group.id,
delete_type=delete_type,
)
post_save.connect(
deleted_and_discarded_issue,
sender=GroupTombstone,
dispatch_uid="analytics.grouptombstone.created",
weak=False,
)
|
|
import datetime
from typing import Any, List, Mapping
from unittest import mock
import orjson
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import do_change_can_create_users, do_change_user_role
from zerver.lib.exceptions import JsonableError
from zerver.lib.streams import access_stream_for_send_message
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import most_recent_message, queries_captured
from zerver.lib.users import is_administrator_role
from zerver.models import (
UserProfile,
UserStatus,
get_display_recipient,
get_realm,
get_stream,
get_user_by_delivery_email,
)
# Most Zulip tests use ZulipTestCase, which inherits from django.test.TestCase.
# We recommend learning Django basics first, so search the web for "django testing".
# A common first result is https://docs.djangoproject.com/en/3.2/topics/testing/
class TestBasics(ZulipTestCase):
def test_basics(self) -> None:
# Django's tests are based on Python's unittest module, so you
# will see us use things like assertEqual, assertTrue, and assertRaisesRegex
# quite often.
# See https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertEqual
self.assertEqual(7 * 6, 42)
class TestBasicUserStuff(ZulipTestCase):
# Zulip has test fixtures with built-in users. It's good to know
# which users are special. For example, Iago is our built-in
# realm administrator. You can also modify users as needed.
def test_users(self) -> None:
# The example_user() helper returns a UserProfile object.
hamlet = self.example_user("hamlet")
self.assertEqual(hamlet.full_name, "King Hamlet")
self.assertEqual(hamlet.role, UserProfile.ROLE_MEMBER)
iago = self.example_user("iago")
self.assertEqual(iago.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
polonius = self.example_user("polonius")
self.assertEqual(polonius.role, UserProfile.ROLE_GUEST)
self.assertEqual(self.example_email("cordelia"), "cordelia@zulip.com")
def test_lib_functions(self) -> None:
# This test is an example of testing a single library function.
# Our tests aren't always at this level of granularity, but it's
# often possible to write concise tests for library functions.
# Get our UserProfile objects first.
iago = self.example_user("iago")
hamlet = self.example_user("hamlet")
# It is a good idea for your tests to clearly demonstrate a
# **change** to a value. So here we want to make sure that
# do_change_user_role will change Hamlet such that
# is_administrator_role becomes True, but we first assert it's
# False.
self.assertFalse(is_administrator_role(hamlet.role))
# Tests should modify properties using the standard library
# functions, like do_change_user_role. Modifying Django
# objects and then using .save() can be buggy, as doing so can
# fail to update caches, RealmAuditLog, or related tables properly.
do_change_user_role(hamlet, UserProfile.ROLE_REALM_OWNER, acting_user=iago)
self.assertTrue(is_administrator_role(hamlet.role))
# After we promote Hamlet, we also demote him. Testing state
# changes like this in a single test can be a good technique,
# although we also don't want tests to be too long.
#
# Important note: You don't need to undo changes done in the
# test at the end. Every test is run inside a database
# transaction, that is reverted after the test completes.
# There are a few exceptions, where tests interact with the
# filesystem (E.g. uploading files), which is generally
# handled by the setUp/tearDown methods for the test class.
do_change_user_role(hamlet, UserProfile.ROLE_MODERATOR, acting_user=iago)
self.assertFalse(is_administrator_role(hamlet.role))
class TestFullStack(ZulipTestCase):
# Zulip's backend tests are largely full-stack integration tests,
# making use of some strategic mocking at times, though we do use
# unit tests for some classes of low-level functions.
#
# See https://zulip.readthedocs.io/en/latest/testing/philosophy.html
# for details on this and other testing design decisions.
def test_client_get(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
# Most full-stack tests require you to log in the user.
# The login_user helper basically wraps Django's client.login().
self.login_user(hamlet)
# Zulip's client_get is a very thin wrapper on Django's client.get.
# We always use the Zulip wrappers for client_get and client_post.
url = f"/json/users/{cordelia.id}"
result = self.client_get(url)
# Almost every meaningful full-stack test for a "happy path" situation
# uses assert_json_success().
self.assert_json_success(result)
# When we unpack the result.content object, we prefer the orjson library.
content = orjson.loads(result.content)
# In this case we will validate the entire payload. It's good to use
# concrete values where possible, but some things, like "cordelia.id",
# are somewhat unpredictable, so we don't hard code values.
#
# Others, like email and full_name here, are fields we haven't
# changed, and thus explicit values would just be hardcoding
# test database defaults in additional places.
self.assertEqual(
content["user"],
dict(
avatar_url=content["user"]["avatar_url"],
avatar_version=1,
date_joined=content["user"]["date_joined"],
email=cordelia.email,
full_name=cordelia.full_name,
is_active=True,
is_admin=False,
is_billing_admin=False,
is_bot=False,
is_guest=False,
is_owner=False,
role=UserProfile.ROLE_MEMBER,
timezone="",
user_id=cordelia.id,
),
)
def test_client_post(self) -> None:
# Here we're gonna test a POST call to /json/users, and it's
# important that we not only check the payload, but we make
# sure that the intended side effects actually happen.
iago = self.example_user("iago")
self.login_user(iago)
realm = get_realm("zulip")
self.assertEqual(realm.id, iago.realm_id)
# Get our failing test first.
self.assertRaises(
UserProfile.DoesNotExist, lambda: get_user_by_delivery_email("romeo@zulip.net", realm)
)
# Before we can successfully post, we need to ensure
# that Iago can create users.
do_change_can_create_users(iago, True)
params = dict(
email="romeo@zulip.net",
password="xxxx",
full_name="Romeo Montague",
)
# Use the Zulip wrapper.
result = self.client_post("/json/users", params)
# Once again we check that the HTTP request was successful.
self.assert_json_success(result)
content = orjson.loads(result.content)
# Finally we test the side effect of the post.
user_id = content["user_id"]
romeo = get_user_by_delivery_email("romeo@zulip.net", realm)
self.assertEqual(romeo.id, user_id)
def test_can_create_users(self) -> None:
# Typically, when testing an API endpoint, we prefer a single
# test covering both the happy path and common error paths.
#
# See https://zulip.readthedocs.io/en/latest/testing/philosophy.html#share-test-setup-code.
iago = self.example_user("iago")
self.login_user(iago)
do_change_can_create_users(iago, False)
valid_params = dict(
email="romeo@zulip.net",
password="xxxx",
full_name="Romeo Montague",
)
# We often use assert_json_error for negative tests.
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "User not authorized for this query", 400)
do_change_can_create_users(iago, True)
incomplete_params = dict(
full_name="Romeo Montague",
)
result = self.client_post("/json/users", incomplete_params)
self.assert_json_error(result, "Missing 'email' argument", 400)
# Verify that the original parameters were valid. Especially
# for errors with generic error messages, this is important to
# confirm that the original request with these parameters
# failed because of incorrect permissions, and not because
# valid_params weren't actually valid.
result = self.client_post("/json/users", valid_params)
self.assert_json_success(result)
# Verify error handling when the user already exists.
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "Email 'romeo@zulip.net' already in use", 400)
def test_tornado_redirects(self) -> None:
# Let's poke a bit at Zulip's event system.
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html
# for context on the system itself and how it should be tested.
#
# Most specific features that might feel tricky to test have
# similarly handy helpers, so find similar tests with `git grep` and read them!
cordelia = self.example_user("cordelia")
self.login_user(cordelia)
params = dict(status_text="on vacation")
events: List[Mapping[str, Any]] = []
# Use the tornado_redirected_to_list context manager to capture
# events.
with self.tornado_redirected_to_list(events, expected_num_events=1):
result = self.api_post(cordelia, "/api/v1/users/me/status", params)
self.assert_json_success(result)
# Check that the POST to Zulip caused the expected events to be sent
# to Tornado.
self.assertEqual(
events[0]["event"],
dict(type="user_status", user_id=cordelia.id, status_text="on vacation"),
)
# Grabbing the last row in the table is OK here, but often it's
# better to look up the object we created via its ID,
# especially if there's risk of similar objects existing
# (E.g. a message sent to that topic earlier in the test).
row = UserStatus.objects.last()
assert row is not None
self.assertEqual(row.user_profile_id, cordelia.id)
self.assertEqual(row.status_text, "on vacation")
class TestStreamHelpers(ZulipTestCase):
# Streams are an important concept in Zulip, and ZulipTestCase
# has helpers such as subscribe, users_subscribed_to_stream,
# and make_stream.
def test_new_streams(self) -> None:
cordelia = self.example_user("cordelia")
othello = self.example_user("othello")
realm = cordelia.realm
stream_name = "Some new stream"
self.subscribe(cordelia, stream_name)
self.assertEqual(set(self.users_subscribed_to_stream(stream_name, realm)), {cordelia})
self.subscribe(othello, stream_name)
self.assertEqual(
set(self.users_subscribed_to_stream(stream_name, realm)), {cordelia, othello}
)
def test_private_stream(self) -> None:
# When we test stream permissions, it's very common to use at least
# two users, so that you can see how different users are impacted.
# We commonly use Othello to represent the "other" user from the primary user.
cordelia = self.example_user("cordelia")
othello = self.example_user("othello")
realm = cordelia.realm
stream_name = "Some private stream"
# Use the invite_only flag in make_stream to make a stream "private".
stream = self.make_stream(stream_name=stream_name, invite_only=True)
self.subscribe(cordelia, stream_name)
self.assertEqual(set(self.users_subscribed_to_stream(stream_name, realm)), {cordelia})
stream = get_stream(stream_name, realm)
self.assertEqual(stream.name, stream_name)
self.assertTrue(stream.invite_only)
# We will now observe that Cordelia can access the stream...
access_stream_for_send_message(cordelia, stream, forwarder_user_profile=None)
# ...but Othello can't.
with self.assertRaisesRegex(JsonableError, "Not authorized to send to stream"):
access_stream_for_send_message(othello, stream, forwarder_user_profile=None)
class TestMessageHelpers(ZulipTestCase):
# If you are testing behavior related to messages, then it's good
# to know about send_stream_message, send_personal_message, and
# most_recent_message.
def test_stream_message(self) -> None:
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
self.subscribe(hamlet, "Denmark")
self.subscribe(iago, "Denmark")
# The functions to send a message return the ID of the created
# message, so you usually you don't need to look it up.
sent_message_id = self.send_stream_message(
sender=hamlet,
stream_name="Denmark",
topic_name="lunch",
content="I want pizza!",
)
# But if you want to verify the most recent message received
# by a user, there's a handy function for that.
iago_message = most_recent_message(iago)
# Here we check that the message we sent is the last one that
# Iago received. While we verify several properties of the
# last message, the most important to verify is the unique ID,
# since that protects us from bugs if this test were to be
# extended to send multiple similar messages.
self.assertEqual(iago_message.id, sent_message_id)
self.assertEqual(iago_message.sender_id, hamlet.id)
self.assertEqual(get_display_recipient(iago_message.recipient), "Denmark")
self.assertEqual(iago_message.topic_name(), "lunch")
self.assertEqual(iago_message.content, "I want pizza!")
def test_personal_message(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
sent_message_id = self.send_personal_message(
from_user=hamlet,
to_user=cordelia,
content="hello there!",
)
cordelia_message = most_recent_message(cordelia)
self.assertEqual(cordelia_message.id, sent_message_id)
self.assertEqual(cordelia_message.sender_id, hamlet.id)
self.assertEqual(cordelia_message.content, "hello there!")
class TestQueryCounts(ZulipTestCase):
def test_capturing_queries(self) -> None:
# It's a common pitfall in Django to accidentally perform
# database queries in a loop, due to lazy evaluation of
# foreign keys. We use the queries_captured context manager to
# ensure our query count is predictable.
#
# When a test containing one of these query count assertions
# fails, we'll want to understand the new queries and whether
# they're necessary. You can investiate whether the changes
# are expected/sensible by comparing print(queries) between
# your branch and main.
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
with queries_captured() as queries:
self.send_personal_message(
from_user=hamlet,
to_user=cordelia,
content="hello there!",
)
# The assert_length helper is another useful extra from ZulipTestCase.
self.assert_length(queries, 15)
class TestDevelopmentEmailsLog(ZulipTestCase):
# We have development specific utilities that automate common tasks
# to improve developer productivity.
#
# Ones such is /emails/generate/ endpoint that can be used to generate
# all sorts of emails zulip sends. Those can be accessed at /emails/
# in development server. Let's test that here.
def test_generate_emails(self) -> None:
# It is a common case where some functions that we test rely
# on a certain setting's value. You can test those under the
# context of a desired setting value as done below.
# The endpoint we're testing here rely on these settings:
# * EMAIL_BACKEND: The backend class used to send emails.
# * DEVELOPMENT_LOG_EMAILS: Whether to log emails sent.
# so, we set those to required values.
#
# If the code you're testing creates logs, it is best to capture them
# and verify the log messages. That can be achieved with assertLogs()
# as you'll see below. Read more about assertLogs() at:
# https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertLogs
with self.settings(EMAIL_BACKEND="zproject.email_backends.EmailLogBackEnd"), self.settings(
DEVELOPMENT_LOG_EMAILS=True
), self.assertLogs(level="INFO") as logger:
result = self.client_get(
"/emails/generate/"
) # Generates emails and redirects to /emails/
self.assertEqual("/emails/", result["Location"]) # Make sure redirect URL is correct.
# The above call to /emails/generate/ creates 15 emails and
# logs the below line for every email.
output_log = (
"INFO:root:Emails sent in development are available at http://testserver/emails"
)
# logger.output is a list of all the log messages captured. Verify it is as expected.
self.assertEqual(logger.output, [output_log] * 15)
# Now, lets actually go the URL the above call redirects to, i.e., /emails/
result = self.client_get(result["Location"])
# assert_in_success_response() is another helper that is commonly used to ensure
# we are on the right page by verifying a string exists in the page's content.
self.assert_in_success_response(["All the emails sent in the Zulip"], result)
class TestMocking(ZulipTestCase):
# Mocking, primarily used in testing, is a technique that allows you to
# replace methods or objects with fake entities.
#
# Mocking is generally used in situations where
# we want to avoid running original code for reasons
# like skipping HTTP requests, saving execution time etc.
#
# Learn more about mocking in-depth at:
# https://zulip.readthedocs.io/en/latest/testing/testing-with-django.html#testing-with-mocks
#
# The following test demonstrates a simple use case
# where mocking is helpful in saving test-run time.
def test_edit_message(self) -> None:
"""
Verify if the time limit imposed on message editing is working correctly.
"""
iago = self.example_user("iago")
self.login("iago")
# Set limit to edit message content.
MESSAGE_CONTENT_EDIT_LIMIT = 5 * 60 # 5 minutes
result = self.client_patch(
"/json/realm",
{
"allow_message_editing": "true",
"message_content_edit_limit_seconds": MESSAGE_CONTENT_EDIT_LIMIT,
},
)
self.assert_json_success(result)
sent_message_id = self.send_stream_message(
iago,
"Scotland",
topic_name="lunch",
content="I want pizza!",
)
message_sent_time = timezone_now()
# Verify message sent.
message = most_recent_message(iago)
self.assertEqual(message.id, sent_message_id)
self.assertEqual(message.content, "I want pizza!")
# Edit message content now. This should work as we're editing
# it immediately after sending i.e., before the limit exceeds.
result = self.client_patch(
f"/json/messages/{sent_message_id}", {"content": "I want burger!"}
)
self.assert_json_success(result)
message = most_recent_message(iago)
self.assertEqual(message.id, sent_message_id)
self.assertEqual(message.content, "I want burger!")
# Now that we tested message editing works within the limit,
# we want to verify it doesn't work beyond the limit.
#
# To do that we'll have to wait for the time limit to pass which is
# 5 minutes here. Easy, use time.sleep() but mind that it slows down the
# test to a great extent which isn't good. This is when mocking comes to rescue.
# We can check what the original code does to determine whether the time limit
# exceeded and mock that here such that the code runs as if the time limit
# exceeded without actually waiting for that long!
#
# In this case, it is timezone_now, an alias to django.utils.timezone.now,
# to which the difference with message-sent-time is checked. So, we want
# that timezone_now() call to return `datetime` object representing time
# that is beyond the limit.
#
# Notice how mock.patch() is used here to do exactly the above mentioned.
# mock.patch() here makes any calls to `timezone_now` in `zerver.lib.actions`
# to return the value passed to `return_value` in the its context.
# You can also use mock.patch() as a decorator depending on the
# requirements. Read more at the documentation link provided above.
time_beyond_edit_limit = message_sent_time + datetime.timedelta(
seconds=MESSAGE_CONTENT_EDIT_LIMIT + 100
) # There's a buffer time applied to the limit, hence the extra 100s.
with mock.patch(
"zerver.lib.actions.timezone_now",
return_value=time_beyond_edit_limit,
):
result = self.client_patch(
f"/json/messages/{sent_message_id}", {"content": "I actually want pizza."}
)
self.assert_json_error(result, msg="The time limit for editing this message has passed")
message = most_recent_message(iago)
self.assertEqual(message.id, sent_message_id)
self.assertEqual(message.content, "I want burger!")
|
|
# Copyright (c) 2014 Dave McCoy (dave.mccoy@cospandesign.com)
# This file is part of Nysa (wiki.cospandesign.com/index.php?title=Nysa).
#
# Nysa is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# Nysa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nysa; If not, see <http://www.gnu.org/licenses/>.
""" nysa platform scanner
"""
__author__ = 'dave.mccoy@cospandesign.com (Dave McCoy)'
import sys
import os
from inspect import isclass
from inspect import ismodule
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, "common")))
from site_manager import SiteManager
from nysa import NysaCommError
from nysa_platform import Platform
from status import Status
class PlatformScannerException(Exception):
pass
class PlatformScanner(object):
def __init__(self, status = None):
super(PlatformScanner, self).__init__()
self.s = status
self.n = None
self.uid = None
self.dev_type = None
def get_board_path_dict(self):
sm = SiteManager()
board_names = sm.get_local_board_names()
board_path_dict = {}
for bname in board_names:
board_path_dict[bname] = sm.get_board_directory(bname)
return board_path_dict
def get_platforms(self):
"""
Return a dictionary of boards with the unique name as the KEY
Args:
Nothing
Returns:
(Dictionary of Platforms)
Raises:
Nothing
"""
board_path_dict = self.get_board_path_dict()
#platform_paths_list = []
module_dict = {}
plat_class_dict = {}
for board in board_path_dict:
platform_paths_list = os.path.join(board_path_dict[board], board)
sys.path.append(os.path.join(board_path_dict[board]))
m = __import__("%s.nysa_platform" % board)
board_platform = m.nysa_platform
for name in dir(board_platform):
item = getattr(board_platform, name)
if not isclass(item):
continue
#XXX: Kinda Jenkie
if "IS_PLATFORM" in dir(item) and item.__name__ is not "Platform":
if self.s: self.s.Debug("Found: %s" % name)
unique = True
for plat_class in plat_class_dict:
if str(plat_class) == str(item):
unique = False
if unique:
#print "Adding Class: %s" % str(item)
plat_class_dict[board] = item
if self.s: self.s.Debug("Platform Classes: %s" % str(plat_class_dict))
return plat_class_dict
def get_platforms(status = None):
"""
Return all platforms in the system
Args:
status (Status): a debug status object
Return:
(list of platforms)
Raises:
Nothing
"""
if status is None:
status = Status()
platforms = []
pscanner = PlatformScanner()
platform_dict = pscanner.get_platforms()
platform_names = platform_dict.keys()
if "sim" in platform_names:
#If sim is in the platform names move it to the back
platform_names.remove("sim")
platform_names.append("sim")
for platform_name in platform_names:
if status: status.Debug("Platform: %s" % str(platform_name))
#Get a reference to a platform object (like sim, or dionysus)
platform_instance = platform_dict[platform_name](status)
#Scan for instances of that particular platform
instances_dict = platform_instance.scan()
for name in instances_dict:
n = instances_dict[name]
if n is not None:
if status: status.Debug("Found a Nysa Instance: %s" % name)
platforms.append(n)
return platforms
def get_platforms_with_device(driver, status = None):
"""
From a driver return a list of platforms that have a reference to a
device that can be controller through this driver
Args:
driver (Driver Object): a driver to find a reference to
Return:
(List of platforms that support the driver)
Raises:
Nothing
"""
if status: status.Debug("Type of driver: %s" % str(driver))
platforms = []
pscanner = PlatformScanner()
platform_dict = pscanner.get_platforms()
platform_names = platform_dict.keys()
if "sim" in platform_names:
#If sim is in the platform names move it to the back
platform_names.remove("sim")
platform_names.append("sim")
for platform_name in platform_names:
if status: status.Debug("Platform: %s" % str(platform_name))
#Get a reference to a platform object (like sim, or dionysus)
platform_instance = platform_dict[platform_name](status)
#Scan for instances of that particular platform
instances_dict = platform_instance.scan()
for name in instances_dict:
try:
n = instances_dict[name]
if n is not None:
if status: status.Debug("Found a Nysa Instance: %s" % name)
n.read_sdb()
if n.is_device_in_platform(driver):
platforms.append(n)
except NysaCommError:
continue
return platforms
def find_board(name, serial = None, status = None):
s = status
pc = PlatformScanner(s)
pc.get_board_path_dict()
platform_class_dict = pc.get_platforms()
board = None
if name is None:
pc = PlatformScanner(s)
platforms = pc.get_platforms()
names = []
for platform_name in platforms:
platform = platforms[platform_name](s)
boards = platform.scan()
#print "boards for %s: % s" % (platform_name, str(boards))
if len(boards) > 0:
#print "Found %d board for %s" % (len(boards), platform_name)
names.append(platforms[platform_name]().get_type())
if len(names) == 1:
#print "Found: %s " % str(names)
if s: s.Debug("Found: %s" % names[0])
name = names[0]
else:
if "sim" in names:
names.remove("sim")
if len(names) == 1:
if s: s.Debug("Found: %s" % names[0])
name = names[0]
else:
raise PlatformScannerException("more than one option for attached board: %s" % str(names))
sys.exit(1)
name = name.lower()
#print "platforms: %s" % str(platform_class_dict.keys())
#print "name: %s" % str(name)
if name not in platform_class_dict:
raise PlatformScannerException("%s is not currently installed, please install more platforms" % name)
p = platform_class_dict[name](s)
dev_dict = p.scan()
if len(dev_dict) == 0:
raise PlatformScannerException("No boards found for %s" % name)
if len(dev_dict) == 1:
name = dev_dict.keys()[0]
board = dev_dict[name]
else:
if serial is None:
exception = ""
exception = "Serial number (ID) required because there are multiple platforms availble\n"
exception += "Available IDs:\n"
for dev in dev_dict:
exception += "\t%s\n" % dev
raise PlatformScannerException(exception)
#Serial Number Specified
if s: s.Debug("Found board: %s, searching for serial: %s" % (name, serial))
for dev in dev_dict:
if dev == serial:
name = dev
board = dev_dict[name]
break
return board
def sdb_to_config(n):
config_dict = {}
print "Pretty print:"
n.pretty_print_sdb()
#Read the board id and find out what type of board this is
config_dict["board"] = n.get_board_name()
#print "Name: %s" % config_dict["board"]
#Read the bus flag (Wishbone or Axie)
if n.is_wishbone_bus():
config_dict["bus_type"] = "wishbone"
config_dict["TEMPLATE"] = "wishbone_template.json"
if n.is_axie_bus():
config_dict["bus_type"] = "axie"
config_dict["TEMPLATE"] = "axie_template.json"
config_dict["SLAVES"] = {}
config_dict["MEMORY"] = {}
#Read the number of slaves
#Go thrugh each of the slave devices and find out what type it is
for i in range (n.get_number_of_devices()):
if n.is_memory_device(i):
name = "Memory %d" % i
config_dict["MEMORY"][name] = {}
config_dict["MEMORY"][name]["sub_id"] = n.get_device_sub_id(i)
config_dict["MEMORY"][name]["unique_id"] = n.get_device_unique_id(i)
config_dict["MEMORY"][name]["address"] = n.get_device_address(i)
config_dict["MEMORY"][name]["size"] = n.get_device_size(i)
continue
name = n.get_device_name_from_id(n.get_device_id(i))
config_dict["SLAVES"][name] = {}
#print "Name: %s" % n.get_device_name_from_id(n.get_device_id(i))
config_dict["SLAVES"][name]["id"] = n.get_device_id(i)
config_dict["SLAVES"][name]["sub_id"] = n.get_device_sub_id(i)
config_dict["SLAVES"][name]["unique_id"] = n.get_device_unique_id(i)
config_dict["SLAVES"][name]["address"] = n.get_device_address(i)
config_dict["SLAVES"][name]["size"] = n.get_device_size(i)
config_dict["INTERFACE"] = {}
return config_dict
#Read the number of memory devices
|
|
#!/usr/bin/env python3
import sys
import os
import mmap
import cairocffi as cairo
import wayland.protocol
from wayland.client import MakeDisplay
from wayland.utils import AnonymousFile
import math
import select
import time
import logging
# See https://github.com/sde1000/python-xkbcommon for the following:
from xkbcommon import xkb
log = logging.getLogger(__name__)
shutdowncode = None
# List of future events; objects must support the nexttime attribute
# and alarm() method. nexttime should be the time at which the object
# next wants to be called, or None if the object temporarily does not
# need to be scheduled.
eventlist = []
# List of file descriptors to watch with handlers. Expected to be objects
# with a fileno() method that returns the appropriate fd number, and methods
# called doread(), dowrite(), etc.
rdlist = []
# List of functions to invoke each time around the event loop. These
# functions may do anything, including changing timeouts and drawing
# on the display.
ticklist = []
# List of functions to invoke before calling select. These functions
# may not change timeouts or draw on the display. They will typically
# flush queued output.
preselectlist = []
class time_guard(object):
def __init__(self, name, max_time):
self._name = name
self._max_time = max_time
def __enter__(self):
self._start_time = time.time()
def __exit__(self, type, value, traceback):
t = time.time()
time_taken = t - self._start_time
if time_taken > self._max_time:
log.info("time_guard: %s took %f seconds",self._name,time_taken)
tick_time_guard = time_guard("tick",0.5)
preselect_time_guard = time_guard("preselect",0.1)
doread_time_guard = time_guard("doread",0.5)
dowrite_time_guard = time_guard("dowrite",0.5)
doexcept_time_guard = time_guard("doexcept",0.5)
alarm_time_guard = time_guard("alarm",0.5)
def eventloop():
global shutdowncode
while shutdowncode is None:
for i in ticklist:
with tick_time_guard:
i()
# Work out what the earliest timeout is
timeout = None
t = time.time()
for i in eventlist:
nt = i.nexttime
i.mainloopnexttime = nt
if nt is None:
continue
if timeout is None or (nt - t) < timeout:
timeout = nt - t
for i in preselectlist:
with preselect_time_guard:
i()
try:
(rd, wr, ex) = select.select(rdlist, [], [], timeout)
except KeyboardInterrupt:
(rd, wr, ex) = [], [], []
shutdowncode = 1
for i in rd:
with doread_time_guard:
i.doread()
for i in wr:
with dowrite_time_guard:
i.dowrite()
for i in ex:
with doexcept_time_guard:
i.doexcept()
# Process any events whose time has come
t = time.time()
for i in eventlist:
if not hasattr(i, 'mainloopnexttime'):
continue
if i.mainloopnexttime and t >= i.mainloopnexttime:
with alarm_time_guard:
i.alarm()
def ping_handler(thing, serial):
"""
Respond to a 'ping' with a 'pong'.
"""
thing.pong(serial)
class Window:
def __init__(self, connection, width, height, title="Window",
class_="python-wayland-test", redraw=None, fullscreen=False):
self.title = title
self.orig_width = width
self.orig_height = height
self._w = connection
if not self._w.shm_formats:
raise RuntimeError("No suitable Shm formats available")
self.is_fullscreen = fullscreen
self.redraw_func = redraw
self.surface = self._w.compositor.create_surface()
self._w.surfaces[self.surface] = self
self.xdg_surface = self._w.xdg_wm_base.get_xdg_surface(self.surface)
self.xdg_toplevel = self.xdg_surface.get_toplevel()
self.xdg_toplevel.set_title(title)
self.xdg_toplevel.set_parent(None)
self.xdg_toplevel.set_app_id(class_)
self.xdg_toplevel.set_min_size(width, height)
self.xdg_toplevel.set_max_size(width, height)
if fullscreen:
self.xdg_toplevel.set_fullscreen(None)
self.wait_for_configure = True
self.xdg_surface.dispatcher['ping'] = ping_handler
self.xdg_surface.dispatcher['configure'] = \
self._xdg_surface_configure_handler
#self.xdg_toplevel.dispatcher['configure'] = lambda *x: None
#self.xdg_toplevel.dispatcher['close'] = lambda *x: None
self.buffer = None
self.shm_data = None
self.surface.commit()
def close(self):
if not self.surface.destroyed:
self.surface.destroy()
if self.buffer is not None:
self.buffer.destroy()
self.buffer = None
self.shm_data.close()
del self.s, self.shm_data
def resize(self, width, height):
# Drop previous buffer and shm data if necessary
if self.buffer:
self.buffer.destroy()
self.shm_data.close()
# Do not complete a resize until configure has been acknowledged
if self.wait_for_configure:
return
wl_shm_format, cairo_shm_format = self._w.shm_formats[0]
stride = cairo.ImageSurface.format_stride_for_width(
cairo_shm_format, width)
size = stride * height
with AnonymousFile(size) as fd:
self.shm_data = mmap.mmap(
fd, size, prot=mmap.PROT_READ | mmap.PROT_WRITE,
flags=mmap.MAP_SHARED)
pool = self._w.shm.create_pool(fd, size)
self.buffer = pool.create_buffer(
0, width, height, stride, wl_shm_format)
pool.destroy()
self.s = cairo.ImageSurface(cairo_shm_format, width, height,
data=self.shm_data, stride=stride)
self.surface.attach(self.buffer, 0, 0)
self.width = width
self.height = height
if self.redraw_func:
# This should invoke `redraw` which then invokes `surface.commit`
self.redraw_func(self)
else:
self.surface.commit()
def redraw(self):
"""Copy the whole window surface to the display"""
self.add_damage()
self.surface.commit()
def add_damage(self, x=0, y=0, width=None, height=None):
if width is None:
width = self.width
if height is None:
height = self.height
self.surface.damage(x, y, width, height)
def pointer_motion(self, seat, time, x, y):
pass
def _xdg_surface_configure_handler(
self, the_xdg_surface, serial):
the_xdg_surface.ack_configure(serial)
self.wait_for_configure = False
if not self.surface.destroyed:
self.resize(self.orig_width, self.orig_height)
class Seat:
def __init__(self, obj, connection, global_name):
self.c_enum = connection.interfaces['wl_seat'].enums['capability']
self.s = obj
self._c = connection
self.global_name = global_name
self.name = None
self.capabilities = 0
self.pointer = None
self.keyboard = None
self.s.dispatcher['capabilities'] = self._capabilities
self.s.dispatcher['name'] = self._name
self.tabsym = xkb.keysym_from_name("Tab")
def removed(self):
if self.pointer:
self.pointer.release()
self.pointer = None
if self.keyboard:
self.keyboard.release()
del self.keyboard_state
self.keyboard = None
# ...that's odd, there's no request in the protocol to destroy
# the seat proxy! I suppose we just have to leave it lying
# around.
def _name(self, seat, name):
print("Seat got name: {}".format(name))
self.name = name
def _capabilities(self, seat, c):
print("Seat {} got capabilities: {}".format(self.name, c))
self.capabilities = c
pointer_available = c & self.c_enum['pointer']
if pointer_available and not self.pointer:
self.pointer = self.s.get_pointer()
self.pointer.dispatcher['enter'] = self.pointer_enter
self.pointer.dispatcher['leave'] = self.pointer_leave
self.pointer.dispatcher['motion'] = self.pointer_motion
self.pointer.silence['motion'] = True
self.pointer.dispatcher['button'] = self.pointer_button
self.pointer.dispatcher['axis'] = self.pointer_axis
self.current_pointer_window = None
if self.pointer and not pointer_available:
self.pointer.release()
self.current_pointer_window = None
self.pointer = None
keyboard_available = c & self.c_enum['keyboard']
if keyboard_available and not self.keyboard:
self.keyboard = self.s.get_keyboard()
self.keyboard.dispatcher['keymap'] = self.keyboard_keymap
self.keyboard.dispatcher['enter'] = self.keyboard_enter
self.keyboard.dispatcher['leave'] = self.keyboard_leave
self.keyboard.dispatcher['key'] = self.keyboard_key
self.keyboard.dispatcher['modifiers'] = self.keyboard_modifiers
self.current_keyboard_window = None
if self.keyboard and not keyboard_available:
self.keyboard.release()
self.current_keyboard_window = None
self.keyboard_state = None
self.keyboard = None
def pointer_enter(self, pointer, serial, surface, surface_x, surface_y):
print("pointer_enter {} {} {} {}".format(
serial, surface, surface_x, surface_y))
self.current_pointer_window = self._c.surfaces.get(surface, None)
pointer.set_cursor(serial, None, 0, 0)
def pointer_leave(self, pointer, serial, surface):
print("pointer_leave {} {}".format(serial, surface))
self.current_pointer_window = None
def pointer_motion(self, pointer, time, surface_x, surface_y):
if not self.current_pointer_window:
raise Exception("Pointer motion encountered even though there is not a matching window")
self.current_pointer_window.pointer_motion(
self, time, surface_x, surface_y)
def pointer_button(self, pointer, serial, time, button, state):
print("pointer_button {} {} {} {}".format(serial, time, button, state))
if state == 1 and self.current_pointer_window:
print("Seat {} starting shell surface move".format(self.name))
self.current_pointer_window.xdg_toplevel.move(self.s, serial)
def pointer_axis(self, pointer, time, axis, value):
print("pointer_axis {} {} {}".format(time, axis, value))
def keyboard_keymap(self, keyboard, format_, fd, size):
print("keyboard_keymap {} {} {}".format(format_, fd, size))
keymap_data = mmap.mmap(
fd, size, prot=mmap.PROT_READ, flags=mmap.MAP_PRIVATE)
os.close(fd)
# The provided keymap appears to have a terminating NULL which
# xkbcommon chokes on. Specify length=size-1 to remove it.
keymap = self._c.xkb_context.keymap_new_from_buffer(
keymap_data, length=size - 1)
keymap_data.close()
self.keyboard_state = keymap.state_new()
def keyboard_enter(self, keyboard, serial, surface, keys):
print("keyboard_enter {} {} {}".format(serial, surface, keys))
self.current_keyboard_window = self._c.surfaces.get(surface, None)
def keyboard_leave(self, keyboard, serial, surface):
print("keyboard_leave {} {}".format(serial, surface))
self.current_keyboard_window = None
def keyboard_key(self, keyboard, serial, time, key, state):
print("keyboard_key {} {} {} {}".format(serial, time, key, state))
sym = self.keyboard_state.key_get_one_sym(key + 8)
if state == 1 and sym == self.tabsym:
# Why did I put this in?!
print("Saw a tab!")
if state == 1:
s = self.keyboard_state.key_get_string(key + 8)
print("s={}".format(repr(s)))
if s == "q":
global shutdowncode
shutdowncode = 0
elif s == "c":
# Close the window
self.current_keyboard_window.close()
elif s == "f":
# Fullscreen toggle
if self.current_keyboard_window.is_fullscreen:
self.current_keyboard_window.xdg_toplevel.unset_fullscreen()
self.current_keyboard_window.is_fullscreen = False
self.current_keyboard_window.resize(
self.current_keyboard_window.orig_width,
self.current_keyboard_window.orig_height)
else:
self.current_keyboard_window.xdg_toplevel.set_fullscreen(None)
self.current_keyboard_window.is_fullscreen = True
def keyboard_modifiers(self, keyboard, serial, mods_depressed,
mods_latched, mods_locked, group):
print("keyboard_modifiers {} {} {} {} {}".format(
serial, mods_depressed, mods_latched, mods_locked, group))
self.keyboard_state.update_mask(mods_depressed, mods_latched,
mods_locked, group, 0, 0)
class Output:
def __init__(self, obj, connection, global_name):
self.o = obj
self._c = connection
self.global_name = global_name
self.o.dispatcher['geometry'] = self._geometry
self.o.dispatcher['mode'] = self._mode
self.o.dispatcher['done'] = self._done
def _geometry(self, output, x, y, phy_width, phy_height, subpixel,
make, model, transform):
print("Ouput: got geometry: x={}, y={}, phy_width={}, phy_height={},"
"make={}, model={}".format(x, y, phy_width, phy_height,
make, model))
def _mode(self, output, flags, width, height, refresh):
print("Output: got mode: flags={}, width={}, height={}, refresh={}" \
.format(flags, width, height, refresh))
def _done(self, output):
print("Output: done for now")
class WaylandConnection:
def __init__(self, wp_base, *other_wps):
self.wps = (wp_base,) + other_wps
self.interfaces = {}
for wp in self.wps:
for k,v in wp.interfaces.items():
self.interfaces[k] = v
# Create the Display proxy class from the protocol
Display = MakeDisplay(wp_base)
self.display = Display()
self.registry = self.display.get_registry()
self.registry.dispatcher['global'] = self.registry_global_handler
self.registry.dispatcher['global_remove'] = \
self.registry_global_remove_handler
self.xkb_context = xkb.Context()
# Dictionary mapping surface proxies to Window objects
self.surfaces = {}
self.compositor = None
self.xdg_wm_base = None
self.shm = None
self.shm_formats = []
self.seats = []
self.outputs = []
# Bind to the globals that we're interested in. NB we won't
# pick up things like shm_formats at this point; after we bind
# to wl_shm we need another roundtrip before we can be sure to
# have received them.
self.display.roundtrip()
if not self.compositor:
raise RuntimeError("Compositor not found")
if not self.xdg_wm_base:
raise RuntimeError("xdg_wm_base not found")
if not self.shm:
raise RuntimeError("Shm not found")
# Pick up shm formats
self.display.roundtrip()
rdlist.append(self)
preselectlist.append(self._preselect)
def fileno(self):
return self.display.get_fd()
def disconnect(self):
self.display.disconnect()
def doread(self):
self.display.recv()
self.display.dispatch_pending()
def _preselect(self):
self.display.flush()
def registry_global_handler(self, registry, name, interface, version):
print("registry_global_handler: {} is {} v{}".format(
name, interface, version))
if interface == "wl_compositor":
# We know up to and require version 3
self.compositor = registry.bind(
name, self.interfaces['wl_compositor'], 3)
elif interface == "xdg_wm_base":
# We know up to and require version 1
self.xdg_wm_base = registry.bind(
name, self.interfaces['xdg_wm_base'], 1)
elif interface == "wl_shm":
# We know up to and require version 1
self.shm = registry.bind(
name, self.interfaces['wl_shm'], 1)
self.shm.dispatcher['format'] = self.shm_format_handler
elif interface == "wl_seat":
# We know up to and require version 4
self.seats.append(Seat(registry.bind(
name, self.interfaces['wl_seat'], 4), self, name))
elif interface == "wl_output":
# We know up to and require version 2
self.outputs.append(Output(registry.bind(
name, self.interfaces['wl_output'], 2), self, name))
def registry_global_remove_handler(self, registry, name):
# Haven't been able to get weston to send this event!
print("registry_global_remove_handler: {} gone".format(name))
for s in self.seats:
if s.global_name == name:
print("...it was a seat! Releasing seat resources.")
s.removed()
def shm_format_handler(self, shm, format_):
f = shm.interface.enums['format']
if format_ == f.entries['argb8888'].value:
self.shm_formats.append((format_, cairo.FORMAT_ARGB32))
elif format_ == f.entries['xrgb8888'].value:
self.shm_formats.append((format_, cairo.FORMAT_RGB24))
elif format_ == f.entries['rgb565'].value:
self.shm_formats.append((format_, cairo.FORMAT_RGB16_565))
def draw_in_window(w):
ctx = cairo.Context(w.s)
ctx.set_source_rgba(0,0,0,0)
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.paint()
ctx.set_operator(cairo.OPERATOR_OVER)
ctx.scale(w.width, w.height)
pat = cairo.LinearGradient(0.0, 0.0, 0.0, 1.0)
pat.add_color_stop_rgba(1, 0.7, 0, 0, 0.5)
pat.add_color_stop_rgba(0, 0.9, 0.7, 0.2, 1)
ctx.rectangle(0, 0, 1, 1)
ctx.set_source(pat)
ctx.fill()
del pat
ctx.translate(0.1, 0.1)
ctx.move_to(0, 0)
ctx.arc(0.2, 0.1, 0.1, -math.pi/2, 0)
ctx.line_to(0.5, 0.1)
ctx.curve_to(0.5, 0.2, 0.5, 0.4, 0.2, 0.8)
ctx.close_path()
ctx.set_source_rgb(0.3, 0.2, 0.5)
ctx.set_line_width(0.02)
ctx.stroke()
ctx.select_font_face("monospace")
ctx.set_font_size(0.05)
ctx.set_source_rgb(1.0, 1.0, 1.0)
ctx.move_to(0.2, 0.2)
ctx.show_text("{} {} x {}".format(w.title, w.width, w.height))
del ctx
w.s.flush()
w.redraw()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# Load the main Wayland protocol.
wp_base = wayland.protocol.Protocol("/usr/share/wayland/wayland.xml")
wp_xdg_shell = wayland.protocol.Protocol("/usr/share/wayland-protocols/stable/xdg-shell/xdg-shell.xml")
try:
conn = WaylandConnection(wp_base, wp_xdg_shell)
except FileNotFoundError as e:
if e.errno == 2:
print("Unable to connect to the compositor - "
"is one running?")
sys.exit(1)
raise
w1 = Window(conn, 640, 480, title="Window 1", redraw=draw_in_window)
w2 = Window(conn, 320, 240, title="Window 2", redraw=draw_in_window)
w3 = Window(conn, 160, 120, title="Window 3", redraw=draw_in_window)
eventloop()
w1.close()
w2.close()
w3.close()
conn.display.roundtrip()
conn.disconnect()
print("About to exit with code {}".format(shutdowncode))
logging.shutdown()
sys.exit(shutdowncode)
|
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import LeaveOneOut, train_test_split
from sklearn.utils._testing import (assert_array_almost_equal,
assert_almost_equal,
assert_array_equal,
ignore_warnings)
from sklearn.utils.extmath import softmax
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@pytest.fixture(scope="module")
def data():
X, y = make_classification(
n_samples=200, n_features=6, random_state=42
)
return X, y
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration(data, method, ensemble):
# Test calibration objects with isotonic and sigmoid
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble)
with pytest.raises(ValueError):
cal_clf.fit(X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
# Note that this fit overwrites the fit on the entire training
# set
cal_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf,
prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_cal_clf,
1 - prob_pos_cal_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss((y_test + 1) % 2,
prob_pos_cal_clf_relabeled))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_bad_method(data, ensemble):
# Check only "isotonic" and "sigmoid" are accepted as methods
X, y = data
clf = LinearSVC()
clf_invalid_method = CalibratedClassifierCV(
clf, method="foo", ensemble=ensemble
)
with pytest.raises(ValueError):
clf_invalid_method.fit(X, y)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_regressor(data, ensemble):
# `base-estimator` should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
X, y = data
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), ensemble=ensemble)
with pytest.raises(RuntimeError):
clf_base_regressor.fit(X, y)
def test_calibration_default_estimator(data):
# Check base_estimator default is LinearSVC
X, y = data
calib_clf = CalibratedClassifierCV(cv=2)
calib_clf.fit(X, y)
base_est = calib_clf.calibrated_classifiers_[0].base_estimator
assert isinstance(base_est, LinearSVC)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_cv_splitter(data, ensemble):
# Check when `cv` is a CV splitter
X, y = data
splits = 5
kfold = KFold(n_splits=splits)
calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble)
assert isinstance(calib_clf.cv, KFold)
assert calib_clf.cv.n_splits == splits
calib_clf.fit(X, y)
expected_n_clf = splits if ensemble else 1
assert len(calib_clf.calibrated_classifiers_) == expected_n_clf
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_sample_weight(data, method, ensemble):
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(
base_estimator, method=method, ensemble=ensemble
)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert diff > 0.1
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_parallel_execution(data, method, ensemble):
"""Test parallel calibration"""
X, y = data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
base_estimator = LinearSVC(random_state=42)
cal_clf_parallel = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=2, ensemble=ensemble
)
cal_clf_parallel.fit(X_train, y_train)
probs_parallel = cal_clf_parallel.predict_proba(X_test)
cal_clf_sequential = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=1, ensemble=ensemble
)
cal_clf_sequential.fit(X_train, y_train)
probs_sequential = cal_clf_sequential.predict_proba(X_test)
assert_allclose(probs_parallel, probs_sequential)
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
# increase the number of RNG seeds to assess the statistical stability of this
# test:
@pytest.mark.parametrize('seed', range(2))
def test_calibration_multiclass(method, ensemble, seed):
def multiclass_brier(y_true, proba_pred, n_classes):
Y_onehot = np.eye(n_classes)[y_true]
return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]
# Test calibration for multiclass with classifier that implements
# only decision function.
clf = LinearSVC(random_state=7)
X, y = make_blobs(n_samples=500, n_features=100, random_state=seed,
centers=10, cluster_std=15.0)
# Use an unbalanced dataset by collapsing 8 clusters into one class
# to make the naive calibration based on a softmax more unlikely
# to work.
y[y > 2] = 2
n_classes = np.unique(y).shape[0]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
# Check probabilities sum to 1
assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that the dataset is not too trivial, otherwise it's hard
# to get interesting calibration data during the internal
# cross-validation loop.
assert 0.65 < clf.score(X_test, y_test) < 0.95
# Check that the accuracy of the calibrated model is never degraded
# too much compared to the original classifier.
assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)
# Check that Brier loss of calibrated classifier is smaller than
# loss obtained by naively turning OvR decision function to
# probabilities via a softmax
uncalibrated_brier = \
multiclass_brier(y_test, softmax(clf.decision_function(X_test)),
n_classes=n_classes)
calibrated_brier = multiclass_brier(y_test, probas,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
clf = RandomForestClassifier(n_estimators=30, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
uncalibrated_brier = multiclass_brier(y_test, clf_probs,
n_classes=n_classes)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
calibrated_brier = multiclass_brier(y_test, cal_clf_probs,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
def test_calibration_zero_probability():
# Test an edge case where _CalibratedClassifier avoids numerical errors
# in the multiclass normalization step if all the calibrators output
# are zero all at once for a given sample and instead fallback to uniform
# probabilities.
class ZeroCalibrator():
# This function is called from _CalibratedClassifier.predict_proba.
def predict(self, X):
return np.zeros(X.shape[0])
X, y = make_blobs(n_samples=50, n_features=10, random_state=7,
centers=10, cluster_std=15.0)
clf = DummyClassifier().fit(X, y)
calibrator = ZeroCalibrator()
cal_clf = _CalibratedClassifier(
base_estimator=clf, calibrators=[calibrator], classes=clf.classes_)
probas = cal_clf.predict_proba(X)
# Check that all probabilities are uniformly 1. / clf.n_classes_
assert_allclose(probas, 1. / clf.n_classes_)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
# Check error if clf not prefit
unfit_clf = CalibratedClassifierCV(clf, cv="prefit")
with pytest.raises(NotFittedError):
unfit_clf.fit(X_calib, y_calib)
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
cal_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = cal_clf.predict_proba(this_X_test)
y_pred = cal_clf.predict(this_X_test)
prob_pos_cal_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
def test_calibration_ensemble_false(data, method):
# Test that `ensemble=False` is the same as using predictions from
# `cross_val_predict` to train calibrator.
X, y = data
clf = LinearSVC(random_state=7)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False)
cal_clf.fit(X, y)
cal_probas = cal_clf.predict_proba(X)
# Get probas manually
unbiased_preds = cross_val_predict(
clf, X, y, cv=3, method='decision_function'
)
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
else:
calibrator = _SigmoidCalibration()
calibrator.fit(unbiased_preds, y)
# Use `clf` fit on all data
clf.fit(X, y)
clf_df = clf.decision_function(X)
manual_probas = calibrator.predict(clf_df)
assert_allclose(cal_probas[:, 1], manual_probas)
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
with pytest.raises(ValueError):
_SigmoidCalibration().fit(np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert len(prob_true) == len(prob_pred)
assert len(prob_true) == 2
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
with pytest.raises(ValueError):
calibration_curve([1.1], [-0.1], normalize=False)
# test that quantiles work as expected
y_true2 = np.array([0, 0, 0, 0, 1, 1])
y_pred2 = np.array([0., 0.1, 0.2, 0.5, 0.9, 1.])
prob_true_quantile, prob_pred_quantile = calibration_curve(
y_true2, y_pred2, n_bins=2, strategy='quantile')
assert len(prob_true_quantile) == len(prob_pred_quantile)
assert len(prob_true_quantile) == 2
assert_almost_equal(prob_true_quantile, [0, 2 / 3])
assert_almost_equal(prob_pred_quantile, [0.1, 0.8])
# Check that error is raised when invalid strategy is selected
with pytest.raises(ValueError):
calibration_curve(y_true2, y_pred2, strategy='percentile')
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_nan_imputer(ensemble):
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', SimpleImputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(
clf, cv=2, method='isotonic', ensemble=ensemble
)
clf_c.fit(X, y)
clf_c.predict(X)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_prob_sum(ensemble):
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5,
n_classes=num_classes)
clf = LinearSVC(C=1.0, random_state=7)
clf_prob = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_less_classes(ensemble):
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0, random_state=7)
cal_clf = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
cal_clf.fit(X, y)
for i, calibrated_classifier in \
enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
if ensemble:
# Check that the unobserved class has proba=0
assert_array_equal(proba[:, i], np.zeros(len(y)))
# Check for all other classes proba>0
assert np.all(proba[:, :i] > 0)
assert np.all(proba[:, i + 1:] > 0)
else:
# Check `proba` are all 1/n_classes
assert np.allclose(proba, 1 / proba.shape[0])
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('X', [np.random.RandomState(42).randn(15, 5, 2),
np.random.RandomState(42).randn(15, 5, 2, 6)])
def test_calibration_accepts_ndarray(X):
"""Test that calibration accepts n-dimensional arrays as input"""
y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]
class MockTensorClassifier(BaseEstimator):
"""A toy estimator that accepts tensor inputs"""
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def decision_function(self, X):
# toy decision function that just needs to have the right shape:
return X.reshape(X.shape[0], -1).sum(axis=1)
calibrated_clf = CalibratedClassifierCV(MockTensorClassifier())
# we should be able to fit this classifier with no error
calibrated_clf.fit(X, y)
@pytest.fixture
def dict_data():
dict_data = [
{'state': 'NY', 'age': 'adult'},
{'state': 'TX', 'age': 'adult'},
{'state': 'VT', 'age': 'child'},
]
text_labels = [1, 0, 1]
return dict_data, text_labels
@pytest.fixture
def dict_data_pipeline(dict_data):
X, y = dict_data
pipeline_prefit = Pipeline([
('vectorizer', DictVectorizer()),
('clf', RandomForestClassifier())
])
return pipeline_prefit.fit(X, y)
def test_calibration_dict_pipeline(dict_data, dict_data_pipeline):
"""Test that calibration works in prefit pipeline with transformer
`X` is not array-like, sparse matrix or dataframe at the start.
See https://github.com/scikit-learn/scikit-learn/issues/8710
Also test it can predict without running into validation errors.
See https://github.com/scikit-learn/scikit-learn/issues/19637
"""
X, y = dict_data
clf = dict_data_pipeline
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
calib_clf.fit(X, y)
# Check attributes are obtained from fitted estimator
assert_array_equal(calib_clf.classes_, clf.classes_)
# Neither the pipeline nor the calibration meta-estimator
# expose the n_features_in_ check on this kind of data.
assert not hasattr(clf, 'n_features_in_')
assert not hasattr(calib_clf, 'n_features_in_')
# Ensure that no error is thrown with predict and predict_proba
calib_clf.predict(X)
calib_clf.predict_proba(X)
@pytest.mark.parametrize('clf, cv', [
pytest.param(LinearSVC(C=1), 2),
pytest.param(LinearSVC(C=1), 'prefit'),
])
def test_calibration_attributes(clf, cv):
# Check that `n_features_in_` and `classes_` attributes created properly
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
if cv == 'prefit':
clf = clf.fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv=cv)
calib_clf.fit(X, y)
if cv == 'prefit':
assert_array_equal(calib_clf.classes_, clf.classes_)
assert calib_clf.n_features_in_ == clf.n_features_in_
else:
classes = LabelEncoder().fit(y).classes_
assert_array_equal(calib_clf.classes_, classes)
assert calib_clf.n_features_in_ == X.shape[1]
def test_calibration_inconsistent_prefit_n_features_in():
# Check that `n_features_in_` from prefit base estimator
# is consistent with training set
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
clf = LinearSVC(C=1).fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
msg = "X has 3 features, but LinearSVC is expecting 5 features as input."
with pytest.raises(ValueError, match=msg):
calib_clf.fit(X[:, :3], y)
# FIXME: remove in 1.1
def test_calibrated_classifier_cv_deprecation(data):
# Check that we raise the proper deprecation warning if accessing
# `calibrators_` from the `_CalibratedClassifier`.
X, y = data
calib_clf = CalibratedClassifierCV(cv=2).fit(X, y)
with pytest.warns(FutureWarning):
calibrators = calib_clf.calibrated_classifiers_[0].calibrators_
for clf1, clf2 in zip(
calibrators, calib_clf.calibrated_classifiers_[0].calibrators
):
assert clf1 is clf2
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.parameter`."""
##############################################################################
# IMPORTS
# STDLIB
import ast
import inspect
import sys
# THIRD PARTY
import pytest
import numpy as np
# LOCAL
import astropy.units as u
from astropy.cosmology import Cosmology
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter, _validate_to_float, _validate_with_unit
##############################################################################
# TESTS
##############################################################################
class ParameterTestMixin:
"""Tests for a :class:`astropy.cosmology.Parameter` on a Cosmology.
:class:`astropy.cosmology.Parameter` is a descriptor and this test suite
tests descriptors by class inheritance, so ``ParameterTestMixin`` is mixed
into ``TestCosmology`` (tests :class:`astropy.cosmology.Cosmology`).
"""
@pytest.fixture
def parameter(self, cosmo_cls):
"""Cosmological Parameters"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__parameters__).pop())
@pytest.fixture
def all_parameter(self, cosmo_cls):
"""Cosmological All Parameter instances"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__all_parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__all_parameters__).pop())
# ===============================================================
# Method Tests
def test_Parameter_class_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes on class."""
# _registry_validators
assert hasattr(all_parameter, "_registry_validators")
assert isinstance(all_parameter._registry_validators, dict)
assert all(isinstance(k, str) for k in all_parameter._registry_validators.keys())
assert all(callable(v) for v in all_parameter._registry_validators.values())
def test_Parameter_init(self):
"""Test :class:`astropy.cosmology.Parameter` instantiation."""
# defaults
parameter = Parameter()
assert parameter.fvalidate is _validate_with_unit
assert parameter.unit is None
assert parameter.equivalencies == []
assert parameter.format_spec == ".3g"
assert parameter.derived is False
assert parameter.name is None
# setting all kwargs
parameter = Parameter(fvalidate="float", doc="DOCSTRING",
unit="km", equivalencies=[u.mass_energy()],
fmt=".4f", derived=True)
assert parameter.fvalidate is _validate_to_float
assert parameter.unit is u.km
assert parameter.equivalencies == [u.mass_energy()]
assert parameter.format_spec == ".4f"
assert parameter.derived is True
def test_Parameter_instance_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
assert hasattr(all_parameter, "__doc__")
# Parameter
assert hasattr(all_parameter, "_unit")
assert hasattr(all_parameter, "_equivalencies")
assert hasattr(all_parameter, "_fmt")
assert hasattr(all_parameter, "_derived")
# __set_name__
assert hasattr(all_parameter, "_attr_name")
assert hasattr(all_parameter, "_attr_name_private")
def test_Parameter_fvalidate(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
def test_Parameter_name(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
assert hasattr(all_parameter, "name")
assert isinstance(all_parameter.name, str)
assert all_parameter.name is all_parameter._attr_name
def test_Parameter_unit(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
assert hasattr(all_parameter, "unit")
assert isinstance(all_parameter.unit, (u.UnitBase, type(None)))
assert all_parameter.unit is all_parameter._unit
def test_Parameter_equivalencies(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
assert hasattr(all_parameter, "equivalencies")
assert isinstance(all_parameter.equivalencies, (list, u.Equivalency))
assert all_parameter.equivalencies is all_parameter._equivalencies
def test_Parameter_format_spec(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.format_spec`."""
assert hasattr(all_parameter, "format_spec")
assert isinstance(all_parameter.format_spec, str)
assert all_parameter.format_spec is all_parameter._fmt
def test_Parameter_derived(self, cosmo_cls, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
assert hasattr(all_parameter, "derived")
assert isinstance(all_parameter.derived, bool)
assert all_parameter.derived is all_parameter._derived
# test value
if all_parameter.name in cosmo_cls.__parameters__:
assert all_parameter.derived is False
else:
assert all_parameter.derived is True
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__get__`."""
# from class
parameter = getattr(cosmo_cls, all_parameter.name)
assert isinstance(parameter, Parameter)
assert parameter is all_parameter
# from instance
parameter = getattr(cosmo, all_parameter.name)
assert np.all(parameter == getattr(cosmo, all_parameter._attr_name_private))
def test_Parameter_descriptor_set(self, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__set__`."""
# test it's already set
assert hasattr(cosmo, all_parameter._attr_name_private)
# and raises an error if set again
with pytest.raises(AttributeError, match="can't set attribute"):
setattr(cosmo, all_parameter._attr_name, None)
# -------------------------------------------
# validate value
# tested later.
# ===============================================================
# Usage Tests
def test_Parameter_listed(self, cosmo_cls, all_parameter):
"""Test each `astropy.cosmology.Parameter` attached to Cosmology."""
# just double check that each entry is a Parameter
assert isinstance(all_parameter, Parameter)
# the reverse: check that if it is a Parameter, it's listed.
# note have to check the more inclusive ``__all_parameters__``
assert all_parameter.name in cosmo_cls.__all_parameters__
if not all_parameter.derived:
assert all_parameter.name in cosmo_cls.__parameters__
def test_parameter_related_attributes_on_Cosmology(self, cosmo_cls):
"""Test `astropy.cosmology.Parameter`-related on Cosmology."""
# establish has expected attribute
assert hasattr(cosmo_cls, "__parameters__")
assert hasattr(cosmo_cls, "__all_parameters__")
def test_Parameter_not_unique(self, cosmo_cls, clean_registry):
"""Cosmology Parameter not unique to class when subclass defined."""
# define subclass to show param is same
class ExampleBase(cosmo_cls):
param = Parameter()
class Example(ExampleBase): pass
assert Example.param is ExampleBase.param
assert Example.__parameters__ == ExampleBase.__parameters__
def test_Parameters_reorder_by_signature(self, cosmo_cls, clean_registry):
"""Test parameters are reordered."""
class Example(cosmo_cls):
param = Parameter()
def __init__(self, param, *, name=None, meta=None):
pass # never actually initialized
# param should be 1st, all other parameters next
Example.__parameters__[0] == "param"
# Check the other parameters are as expected.
# only run this test if "param" is not already on the cosmology
if cosmo_cls.__parameters__[0] != "param":
assert set(Example.__parameters__[1:]) == set(cosmo_cls.__parameters__)
def test_make_from_Parameter(self, cosmo_cls, clean_registry):
"""Test the parameter creation process. Uses ``__set__``."""
class Example(cosmo_cls):
param = Parameter(unit=u.eV, equivalencies=u.mass_energy())
def __init__(self, param, *, name=None, meta=None):
self.param = param
@property
def is_flat(self):
return super().is_flat()
assert Example(1).param == 1 * u.eV
assert Example(1 * u.eV).param == 1 * u.eV
assert Example(1 * u.J).param == (1 * u.J).to(u.eV)
assert Example(1 * u.kg).param == (1 * u.kg).to(u.eV, u.mass_energy())
# ========================================================================
class TestParameter(ParameterTestMixin):
"""
Test `astropy.cosmology.Parameter` directly. Adds a lot of specific tests
that wouldn't be covered by the per-cosmology tests.
"""
def setup_class(self):
class Example1(Cosmology):
param = Parameter(doc="Description of example parameter.",
unit=u.m, equivalencies=u.mass_energy())
def __init__(self, param=15):
self.param = param
@property
def is_flat(self):
return super().is_flat()
# with validator
class Example2(Example1):
def __init__(self, param=15 * u.m):
self.param = param
@Example1.param.validator
def param(self, param, value):
return value.to(u.km)
# attributes
self.classes = {"Example1": Example1, "Example2": Example2}
def teardown_class(self):
for cls in self.classes.values():
_COSMOLOGY_CLASSES.pop(cls.__qualname__)
@pytest.fixture(params=["Example1", "Example2"])
def cosmo_cls(self, request):
"""Cosmology class."""
return self.classes[request.param]
@pytest.fixture
def cosmo(self, cosmo_cls):
"""Cosmology instance"""
return cosmo_cls()
@pytest.fixture
def param(self, cosmo_cls):
"""Get Parameter 'param' from cosmology class."""
return cosmo_cls.param
# ==============================================================
def test_Parameter_instance_attributes(self, param):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
super().test_Parameter_instance_attributes(param)
# property
assert param.__doc__ == "Description of example parameter."
# custom from init
assert param._unit == u.m
assert param._equivalencies == u.mass_energy()
assert param._fmt == ".3g"
assert param._derived == False
# custom from set_name
assert param._attr_name == "param"
assert param._attr_name_private == "_param"
def test_Parameter_fvalidate(self, cosmo, param):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
super().test_Parameter_fvalidate(param)
value = param.fvalidate(cosmo, param, 1000 * u.m)
assert value == 1 * u.km
def test_Parameter_name(self, param):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
super().test_Parameter_name(param)
assert param.name == "param"
def test_Parameter_unit(self, param):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
super().test_Parameter_unit(param)
assert param.unit == u.m
def test_Parameter_equivalencies(self, param):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
super().test_Parameter_equivalencies(param)
assert param.equivalencies == u.mass_energy()
def test_Parameter_format_spec(self, param):
"""Test :attr:`astropy.cosmology.Parameter.format_spec`."""
super().test_Parameter_format_spec(param)
assert param.format_spec == ".3g"
def test_Parameter_derived(self, cosmo_cls, param):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
super().test_Parameter_derived(cosmo_cls, param)
assert param.derived is False
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.__get__`."""
super().test_Parameter_descriptor_get(cosmo_cls, cosmo, param)
# from instance
value = getattr(cosmo, param.name)
assert value == 15 * u.m
# -------------------------------------------
# validation
def test_Parameter_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.validator`."""
for k in Parameter._registry_validators:
newparam = param.validator(k)
assert newparam.fvalidate == newparam._registry_validators[k]
# error for non-registered str
with pytest.raises(ValueError, match="`fvalidate`, if str"):
Parameter(fvalidate="NOT REGISTERED")
# error if wrong type
with pytest.raises(TypeError, match="`fvalidate` must be a function or"):
Parameter(fvalidate=object())
def test_Parameter_validate(self, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.validate`."""
value = param.validate(cosmo, 1000 * u.m)
# whether has custom validator
if param.fvalidate is param._registry_validators["default"]:
assert value.unit == u.m
assert value.value == 1000
else:
assert value.unit == u.km
assert value.value == 1
def test_Parameter_register_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.register_validator`."""
# already registered
with pytest.raises(KeyError, match="validator 'default' already"):
param.__class__.register_validator("default", None)
# validator not None
try:
func = lambda x: x
validator = param.__class__.register_validator("newvalidator", func)
assert validator is func
finally:
param.__class__._registry_validators.pop("newvalidator", None)
# used as decorator
try:
@param.__class__.register_validator("newvalidator")
def func(cosmology, param, value):
return value
assert param.__class__._registry_validators["newvalidator"] is func
finally:
param.__class__._registry_validators.pop("newvalidator", None)
# -------------------------------------------
def test_Parameter_clone(self, param):
"""Test :meth:`astropy.cosmology.Parameter.clone`."""
# this implicitly relies on `__eq__` testing properly. Which is tested.
# basic test that nothing changes
assert param.clone() == param
assert param.clone() is not param # but it's not a 'singleton'
# passing kwargs will change stuff
newparam = param.clone(unit="km/(yr sr)")
assert newparam.unit == u.km / u.yr / u.sr
assert param.unit != u.km / u.yr / u.sr # original is unchanged
# expected failure for not-an-argument
with pytest.raises(TypeError):
param.clone(not_a_valid_parameter=True)
# -------------------------------------------
def test_Parameter_equality(self):
"""
Test Parameter equality.
Determined from the processed initialization args (including defaults).
"""
p1 = Parameter(unit="km / (s Mpc)")
p2 = Parameter(unit="km / (s Mpc)")
assert p1 == p2
# not equal parameters
p3 = Parameter(unit="km / s")
assert p3 != p1
# misc
assert p1 != 2 # show doesn't error
# -------------------------------------------
def test_Parameter_repr(self, cosmo_cls, param):
"""Test Parameter repr."""
r = repr(param)
assert "Parameter(" in r
for subs in ("derived=False", 'unit=Unit("m")', 'equivalencies=[(Unit("kg"), Unit("J")',
"fmt='.3g'", "doc='Description of example parameter.'"):
assert subs in r, subs
# `fvalidate` is a little tricker b/c one of them is custom!
if param.fvalidate in param._registry_validators.values(): # not custom
assert "fvalidate='default'" in r
else:
assert "fvalidate=<" in r # Some function, don't care about details.
def test_Parameter_repr_roundtrip(self, param):
"""Test ``eval(repr(Parameter))`` can round trip to ``Parameter``."""
P = Parameter(doc="A description of this parameter.", derived=True)
NP = eval(repr(P)) # Evaluate string representation back into a param.
assert P == NP
# ==============================================================
def test_Parameter_doesnt_change_with_generic_class(self):
"""Descriptors are initialized once and not updated on subclasses."""
class ExampleBase:
def __init__(self, param=15):
self._param = param
sig = inspect.signature(__init__)
_init_signature = sig.replace(parameters=list(sig.parameters.values())[1:])
param = Parameter(doc="example parameter")
class Example(ExampleBase): pass
assert Example.param is ExampleBase.param
def test_Parameter_doesnt_change_with_cosmology(self, cosmo_cls):
"""Cosmology reinitializes all descriptors when a subclass is defined."""
# define subclass to show param is same
class Example(cosmo_cls): pass
assert Example.param is cosmo_cls.param
# unregister
_COSMOLOGY_CLASSES.pop(Example.__qualname__)
assert Example.__qualname__ not in _COSMOLOGY_CLASSES
|
|
"""
sentry.models.release
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import re
import six
from django.db import models, IntegrityError, transaction
from django.db.models import F
from django.utils import timezone
from jsonfield import JSONField
from sentry.app import locks
from sentry.db.models import (
ArrayField, BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
)
from sentry.models import CommitFileChange
from sentry.constants import VERSION_LENGTH
from sentry.utils.cache import cache
from sentry.utils.hashlib import md5_text
from sentry.utils.retries import TimedRetryPolicy
logger = logging.getLogger(__name__)
_sha1_re = re.compile(r'^[a-f0-9]{40}$')
_dotted_path_prefix_re = re.compile(r'^([a-zA-Z][a-zA-Z0-9-]+)(\.[a-zA-Z][a-zA-Z0-9-]+)+-')
BAD_RELEASE_CHARS = '\n\f\t/'
class ReleaseProject(Model):
__core__ = False
project = FlexibleForeignKey('sentry.Project')
release = FlexibleForeignKey('sentry.Release')
new_groups = BoundedPositiveIntegerField(null=True, default=0)
class Meta:
app_label = 'sentry'
db_table = 'sentry_release_project'
unique_together = (('project', 'release'), )
class Release(Model):
"""
A release is generally created when a new version is pushed into a
production state.
"""
__core__ = False
organization = FlexibleForeignKey('sentry.Organization')
projects = models.ManyToManyField(
'sentry.Project', related_name='releases', through=ReleaseProject
)
# DEPRECATED
project_id = BoundedPositiveIntegerField(null=True)
version = models.CharField(max_length=VERSION_LENGTH)
# ref might be the branch name being released
ref = models.CharField(max_length=VERSION_LENGTH, null=True, blank=True)
url = models.URLField(null=True, blank=True)
date_added = models.DateTimeField(default=timezone.now)
# DEPRECATED - not available in UI or editable from API
date_started = models.DateTimeField(null=True, blank=True)
date_released = models.DateTimeField(null=True, blank=True)
# arbitrary data recorded with the release
data = JSONField(default={})
new_groups = BoundedPositiveIntegerField(default=0)
# generally the release manager, or the person initiating the process
owner = FlexibleForeignKey('sentry.User', null=True, blank=True)
# materialized stats
commit_count = BoundedPositiveIntegerField(null=True)
last_commit_id = BoundedPositiveIntegerField(null=True)
authors = ArrayField(null=True)
total_deploys = BoundedPositiveIntegerField(null=True)
last_deploy_id = BoundedPositiveIntegerField(null=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_release'
unique_together = (('organization', 'version'), )
__repr__ = sane_repr('organization', 'version')
@staticmethod
def is_valid_version(value):
return not (any(c in value for c in BAD_RELEASE_CHARS)
or value in ('.', '..') or not value)
@classmethod
def get_cache_key(cls, organization_id, version):
return 'release:3:%s:%s' % (organization_id, md5_text(version).hexdigest())
@classmethod
def get_lock_key(cls, organization_id, release_id):
return 'releasecommits:{}:{}'.format(organization_id, release_id)
@classmethod
def get(cls, project, version):
cache_key = cls.get_cache_key(project.organization_id, version)
release = cache.get(cache_key)
if release is None:
try:
release = cls.objects.get(
organization_id=project.organization_id,
projects=project,
version=version,
)
except cls.DoesNotExist:
release = -1
cache.set(cache_key, release, 300)
if release == -1:
return
return release
@classmethod
def get_or_create(cls, project, version, date_added=None):
from sentry.models import Project
if date_added is None:
date_added = timezone.now()
cache_key = cls.get_cache_key(project.organization_id, version)
release = cache.get(cache_key)
if release in (None, -1):
# TODO(dcramer): if the cache result is -1 we could attempt a
# default create here instead of default get
project_version = ('%s-%s' % (project.slug, version))[:VERSION_LENGTH]
releases = list(
cls.objects.filter(
organization_id=project.organization_id,
version__in=[version, project_version],
projects=project
)
)
if releases:
try:
release = [r for r in releases if r.version == project_version][0]
except IndexError:
release = releases[0]
else:
try:
with transaction.atomic():
release = cls.objects.create(
organization_id=project.organization_id,
version=version,
date_added=date_added,
total_deploys=0,
)
except IntegrityError:
release = cls.objects.get(
organization_id=project.organization_id, version=version
)
release.add_project(project)
if not project.flags.has_releases:
project.flags.has_releases = True
project.update(flags=F('flags').bitor(Project.flags.has_releases))
# TODO(dcramer): upon creating a new release, check if it should be
# the new "latest release" for this project
cache.set(cache_key, release, 3600)
return release
@classmethod
def merge(cls, to_release, from_releases):
# The following models reference release:
# ReleaseCommit.release
# ReleaseEnvironment.release_id
# ReleaseProject.release
# GroupRelease.release_id
# GroupResolution.release
# Group.first_release
# ReleaseFile.release
from sentry.models import (
ReleaseCommit, ReleaseEnvironment, ReleaseFile, ReleaseProject, Group, GroupRelease,
GroupResolution
)
model_list = (
ReleaseCommit, ReleaseEnvironment, ReleaseFile, ReleaseProject, GroupRelease,
GroupResolution
)
for release in from_releases:
for model in model_list:
if hasattr(model, 'release'):
update_kwargs = {'release': to_release}
else:
update_kwargs = {'release_id': to_release.id}
try:
with transaction.atomic():
model.objects.filter(release_id=release.id).update(**update_kwargs)
except IntegrityError:
for item in model.objects.filter(release_id=release.id):
try:
with transaction.atomic():
model.objects.filter(id=item.id).update(**update_kwargs)
except IntegrityError:
item.delete()
Group.objects.filter(first_release=release).update(first_release=to_release)
release.delete()
@classmethod
def get_closest_releases(cls, project, start_version, limit=5):
# given a release version + project, return next
# `limit` releases (includes the release specified by `version`)
try:
release_dates = cls.objects.filter(
organization_id=project.organization_id,
version=start_version,
projects=project,
).values('date_released', 'date_added').get()
except cls.DoesNotExist:
return []
start_date = release_dates['date_released'] or release_dates['date_added']
return list(Release.objects.filter(
projects=project,
organization_id=project.organization_id,
).extra(select={
'date': 'COALESCE(date_released, date_added)',
}
).extra(
where=["COALESCE(date_released, date_added) >= %s"],
params=[start_date]
).extra(
order_by=['date']
)[:limit])
@property
def short_version(self):
return Release.get_display_version(self.version)
@staticmethod
def get_display_version(version):
match = _dotted_path_prefix_re.match(version)
if match is not None:
version = version[match.end():]
if _sha1_re.match(version):
return version[:7]
return version
def add_dist(self, name, date_added=None):
from sentry.models import Distribution
if date_added is None:
date_added = timezone.now()
return Distribution.objects.get_or_create(
release=self,
name=name,
defaults={
'date_added': date_added,
'organization_id': self.organization_id,
}
)[0]
def get_dist(self, name):
from sentry.models import Distribution
try:
return Distribution.objects.get(name=name, release=self)
except Distribution.DoesNotExist:
pass
def add_project(self, project):
"""
Add a project to this release.
Returns True if the project was added and did not already exist.
"""
from sentry.models import Project
try:
with transaction.atomic():
ReleaseProject.objects.create(project=project, release=self)
if not project.flags.has_releases:
project.flags.has_releases = True
project.update(
flags=F('flags').bitor(Project.flags.has_releases),
)
except IntegrityError:
return False
else:
return True
def set_refs(self, refs, user, fetch=False):
from sentry.api.exceptions import InvalidRepository
from sentry.models import Commit, ReleaseHeadCommit, Repository
from sentry.tasks.commits import fetch_commits
# TODO: this does the wrong thing unless you are on the most
# recent release. Add a timestamp compare?
prev_release = type(self).objects.filter(
organization_id=self.organization_id,
projects__in=self.projects.all(),
).extra(select={
'sort': 'COALESCE(date_released, date_added)',
}).exclude(version=self.version).order_by('-sort').first()
names = {r['repository'] for r in refs}
repos = list(
Repository.objects.filter(
organization_id=self.organization_id,
name__in=names,
)
)
repos_by_name = {r.name: r for r in repos}
invalid_repos = names - set(repos_by_name.keys())
if invalid_repos:
raise InvalidRepository('Invalid repository names: %s' % ','.join(invalid_repos))
for ref in refs:
repo = repos_by_name[ref['repository']]
commit = Commit.objects.get_or_create(
organization_id=self.organization_id,
repository_id=repo.id,
key=ref['commit'],
)[0]
# update head commit for repo/release if exists
ReleaseHeadCommit.objects.create_or_update(
organization_id=self.organization_id,
repository_id=repo.id,
release=self,
values={
'commit': commit,
}
)
if fetch:
fetch_commits.apply_async(
kwargs={
'release_id': self.id,
'user_id': user.id,
'refs': refs,
'prev_release_id': prev_release and prev_release.id,
}
)
def set_commits(self, commit_list):
"""
Bind a list of commits to this release.
These should be ordered from newest to oldest.
This will clear any existing commit log and replace it with the given
commits.
"""
from sentry.models import (
Commit, CommitAuthor, Group, GroupLink, GroupResolution, GroupStatus,
ReleaseCommit, Repository
)
from sentry.plugins.providers.repository import RepositoryProvider
commit_list = [
c for c in commit_list
if not RepositoryProvider.should_ignore_commit(c.get('message', ''))
]
lock_key = type(self).get_lock_key(self.organization_id, self.id)
lock = locks.get(lock_key, duration=10)
with TimedRetryPolicy(10)(lock.acquire):
with transaction.atomic():
# TODO(dcramer): would be good to optimize the logic to avoid these
# deletes but not overly important
ReleaseCommit.objects.filter(
release=self,
).delete()
authors = {}
repos = {}
commit_author_by_commit = {}
latest_commit = None
for idx, data in enumerate(commit_list):
repo_name = data.get('repository'
) or 'organization-{}'.format(self.organization_id)
if repo_name not in repos:
repos[repo_name] = repo = Repository.objects.get_or_create(
organization_id=self.organization_id,
name=repo_name,
)[0]
else:
repo = repos[repo_name]
author_email = data.get('author_email')
if author_email is None and data.get('author_name'):
author_email = (
re.sub(r'[^a-zA-Z0-9\-_\.]*', '', data['author_name']).lower() +
'@localhost'
)
if not author_email:
author = None
elif author_email not in authors:
authors[author_email] = author = CommitAuthor.objects.get_or_create(
organization_id=self.organization_id,
email=author_email,
defaults={
'name': data.get('author_name'),
}
)[0]
if data.get('author_name') and author.name != data['author_name']:
author.update(name=data['author_name'])
else:
author = authors[author_email]
defaults = {
'message': data.get('message'),
'author': author,
'date_added': data.get('timestamp') or timezone.now(),
}
commit, created = Commit.objects.get_or_create(
organization_id=self.organization_id,
repository_id=repo.id,
key=data['id'],
defaults=defaults,
)
if author is None:
author = commit.author
commit_author_by_commit[commit.id] = author
patch_set = data.get('patch_set', [])
for patched_file in patch_set:
CommitFileChange.objects.get_or_create(
organization_id=self.organization.id,
commit=commit,
filename=patched_file['path'],
type=patched_file['type'],
)
if not created:
update_kwargs = {}
if commit.message is None and defaults['message'] is not None:
update_kwargs['message'] = defaults['message']
if commit.author_id is None and defaults['author'] is not None:
update_kwargs['author'] = defaults['author']
if update_kwargs:
commit.update(**update_kwargs)
ReleaseCommit.objects.create(
organization_id=self.organization_id,
release=self,
commit=commit,
order=idx,
)
if latest_commit is None:
latest_commit = commit
self.update(
commit_count=len(commit_list),
authors=[
six.text_type(a_id)
for a_id in ReleaseCommit.objects.filter(
release=self,
commit__author_id__isnull=False,
).values_list('commit__author_id', flat=True).distinct()
],
last_commit_id=latest_commit.id if latest_commit else None,
)
commit_resolutions = list(
GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.commit,
linked_id__in=ReleaseCommit.objects.filter(release=self)
.values_list('commit_id', flat=True),
).values_list('group_id', 'linked_id')
)
user_by_author = {None: None}
for group_id, linked_id in commit_resolutions:
author = commit_author_by_commit.get(linked_id)
if author not in user_by_author:
try:
user_by_author[author] = author.find_users()[0]
except IndexError:
user_by_author[author] = None
actor = user_by_author[author]
with transaction.atomic():
GroupResolution.objects.create_or_update(
group_id=group_id,
values={
'release': self,
'type': GroupResolution.Type.in_release,
'status': GroupResolution.Status.resolved,
'actor_id': actor.id if actor else None,
},
)
Group.objects.filter(
id=group_id,
).update(status=GroupStatus.RESOLVED)
|
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the size of each given file and optionally computes the size of
libchrome.so without the dependencies added for building with android NDK.
Also breaks down the contents of the APK to determine the installed size
and assign size contributions to different classes of file.
"""
import collections
import json
import operator
import optparse
import os
import re
import sys
import tempfile
import zipfile
import zlib
import devil_chromium
from devil.utils import cmd_helper
from pylib.constants import host_paths
_GRIT_PATH = os.path.join(host_paths.DIR_SOURCE_ROOT, 'tools', 'grit')
with host_paths.SysPath(_GRIT_PATH):
from grit.format import data_pack # pylint: disable=import-error
with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
# Static initializers expected in official builds. Note that this list is built
# using 'nm' on libchrome.so which results from a GCC official build (i.e.
# Clang is not supported currently).
_BASE_CHART = {
'format_version': '0.1',
'benchmark_name': 'resource_sizes',
'benchmark_description': 'APK resource size information.',
'trace_rerun_options': [],
'charts': {}
}
_DUMP_STATIC_INITIALIZERS_PATH = os.path.join(
host_paths.DIR_SOURCE_ROOT, 'tools', 'linux', 'dump-static-initializers.py')
_RC_HEADER_RE = re.compile(r'^#define (?P<name>\w+) (?P<id>\d+)$')
def CountStaticInitializers(so_path):
def get_elf_section_size(readelf_stdout, section_name):
# Matches: .ctors PROGBITS 000000000516add0 5169dd0 000010 00 WA 0 0 8
match = re.search(r'\.%s.*$' % re.escape(section_name),
readelf_stdout, re.MULTILINE)
if not match:
return (False, -1)
size_str = re.split(r'\W+', match.group(0))[5]
return (True, int(size_str, 16))
# Find the number of files with at least one static initializer.
# First determine if we're 32 or 64 bit
stdout = cmd_helper.GetCmdOutput(['readelf', '-h', so_path])
elf_class_line = re.search('Class:.*$', stdout, re.MULTILINE).group(0)
elf_class = re.split(r'\W+', elf_class_line)[1]
if elf_class == 'ELF32':
word_size = 4
else:
word_size = 8
# Then find the number of files with global static initializers.
# NOTE: this is very implementation-specific and makes assumptions
# about how compiler and linker implement global static initializers.
si_count = 0
stdout = cmd_helper.GetCmdOutput(['readelf', '-SW', so_path])
has_init_array, init_array_size = get_elf_section_size(stdout, 'init_array')
if has_init_array:
si_count = init_array_size / word_size
si_count = max(si_count, 0)
return si_count
def GetStaticInitializers(so_path):
output = cmd_helper.GetCmdOutput([_DUMP_STATIC_INITIALIZERS_PATH, '-d',
so_path])
return output.splitlines()
def ReportPerfResult(chart_data, graph_title, trace_title, value, units,
improvement_direction='down', important=True):
"""Outputs test results in correct format.
If chart_data is None, it outputs data in old format. If chart_data is a
dictionary, formats in chartjson format. If any other format defaults to
old format.
"""
if chart_data and isinstance(chart_data, dict):
chart_data['charts'].setdefault(graph_title, {})
chart_data['charts'][graph_title][trace_title] = {
'type': 'scalar',
'value': value,
'units': units,
'improvement_direction': improvement_direction,
'important': important
}
else:
perf_tests_results_helper.PrintPerfResult(
graph_title, trace_title, [value], units)
def PrintResourceSizes(files, chartjson=None):
"""Prints the sizes of each given file.
Args:
files: List of files to print sizes for.
"""
for f in files:
ReportPerfResult(chartjson, 'ResourceSizes', os.path.basename(f) + ' size',
os.path.getsize(f), 'bytes')
def PrintApkAnalysis(apk_filename, chartjson=None):
"""Analyse APK to determine size contributions of different file classes."""
# Define a named tuple type for file grouping.
# name: Human readable name for this file group
# regex: Regular expression to match filename
# extracted: Function that takes a file name and returns whether the file is
# extracted from the apk at install/runtime.
FileGroup = collections.namedtuple('FileGroup',
['name', 'regex', 'extracted'])
# File groups are checked in sequence, so more specific regexes should be
# earlier in the list.
YES = lambda _: True
NO = lambda _: False
FILE_GROUPS = (
FileGroup('Native code', r'\.so$', lambda f: 'crazy' not in f),
FileGroup('Java code', r'\.dex$', YES),
FileGroup('Native resources (no l10n)', r'\.pak$', NO),
# For locale paks, assume only english paks are extracted.
FileGroup('Native resources (l10n)', r'\.lpak$', lambda f: 'en_' in f),
FileGroup('ICU (i18n library) data', r'assets/icudtl\.dat$', NO),
FileGroup('V8 Snapshots', r'\.bin$', NO),
FileGroup('PNG drawables', r'\.png$', NO),
FileGroup('Non-compiled Android resources', r'^res/', NO),
FileGroup('Compiled Android resources', r'\.arsc$', NO),
FileGroup('Package metadata', r'^(META-INF/|AndroidManifest\.xml$)', NO),
FileGroup('Unknown files', r'.', NO),
)
apk = zipfile.ZipFile(apk_filename, 'r')
try:
apk_contents = apk.infolist()
finally:
apk.close()
total_apk_size = os.path.getsize(apk_filename)
apk_basename = os.path.basename(apk_filename)
found_files = {}
for group in FILE_GROUPS:
found_files[group] = []
for member in apk_contents:
for group in FILE_GROUPS:
if re.search(group.regex, member.filename):
found_files[group].append(member)
break
else:
raise KeyError('No group found for file "%s"' % member.filename)
total_install_size = total_apk_size
for group in FILE_GROUPS:
apk_size = sum(member.compress_size for member in found_files[group])
install_size = apk_size
install_bytes = sum(f.file_size for f in found_files[group]
if group.extracted(f.filename))
install_size += install_bytes
total_install_size += install_bytes
ReportPerfResult(chartjson, apk_basename + '_Breakdown',
group.name + ' size', apk_size, 'bytes')
ReportPerfResult(chartjson, apk_basename + '_InstallBreakdown',
group.name + ' size', install_size, 'bytes')
transfer_size = _CalculateCompressedSize(apk_filename)
ReportPerfResult(chartjson, apk_basename + '_InstallSize',
'Estimated installed size', total_install_size, 'bytes')
ReportPerfResult(chartjson, apk_basename + '_InstallSize', 'APK size',
total_apk_size, 'bytes')
ReportPerfResult(chartjson, apk_basename + '_TransferSize',
'Transfer size (deflate)', transfer_size, 'bytes')
def IsPakFileName(file_name):
"""Returns whether the given file name ends with .pak or .lpak."""
return file_name.endswith('.pak') or file_name.endswith('.lpak')
def PrintPakAnalysis(apk_filename, min_pak_resource_size, build_type):
"""Print sizes of all resources in all pak files in |apk_filename|."""
print
print 'Analyzing pak files in %s...' % apk_filename
# A structure for holding details about a pak file.
Pak = collections.namedtuple(
'Pak', ['filename', 'compress_size', 'file_size', 'resources'])
# Build a list of Pak objets for each pak file.
paks = []
apk = zipfile.ZipFile(apk_filename, 'r')
try:
for i in (x for x in apk.infolist() if IsPakFileName(x.filename)):
with tempfile.NamedTemporaryFile() as f:
f.write(apk.read(i.filename))
f.flush()
paks.append(Pak(i.filename, i.compress_size, i.file_size,
data_pack.DataPack.ReadDataPack(f.name).resources))
finally:
apk.close()
# Output the overall pak file summary.
total_files = len(paks)
total_compress_size = sum(pak.compress_size for pak in paks)
total_file_size = sum(pak.file_size for pak in paks)
print 'Total pak files: %d' % total_files
print 'Total compressed size: %s' % _FormatBytes(total_compress_size)
print 'Total uncompressed size: %s' % _FormatBytes(total_file_size)
print
# Output the table of details about all pak files.
print '%25s%11s%21s%21s' % (
'FILENAME', 'RESOURCES', 'COMPRESSED SIZE', 'UNCOMPRESSED SIZE')
for pak in sorted(paks, key=operator.attrgetter('file_size'), reverse=True):
print '%25s %10s %12s %6.2f%% %12s %6.2f%%' % (
pak.filename,
len(pak.resources),
_FormatBytes(pak.compress_size),
100.0 * pak.compress_size / total_compress_size,
_FormatBytes(pak.file_size),
100.0 * pak.file_size / total_file_size)
print
print 'Analyzing pak resources in %s...' % apk_filename
# Calculate aggregate stats about resources across pak files.
resource_count_map = collections.defaultdict(int)
resource_size_map = collections.defaultdict(int)
resource_overhead_bytes = 6
for pak in paks:
for r in pak.resources:
resource_count_map[r] += 1
resource_size_map[r] += len(pak.resources[r]) + resource_overhead_bytes
# Output the overall resource summary.
total_resource_size = sum(resource_size_map.values())
total_resource_count = len(resource_count_map)
assert total_resource_size <= total_file_size
print 'Total pak resources: %s' % total_resource_count
print 'Total uncompressed resource size: %s' % _FormatBytes(
total_resource_size)
print
resource_id_name_map = _GetResourceIdNameMap(build_type)
# Output the table of details about all resources across pak files.
print
print '%56s %5s %17s' % ('RESOURCE', 'COUNT', 'UNCOMPRESSED SIZE')
for i in sorted(resource_size_map, key=resource_size_map.get,
reverse=True):
if resource_size_map[i] >= min_pak_resource_size:
print '%56s %5s %9s %6.2f%%' % (
resource_id_name_map.get(i, i),
resource_count_map[i],
_FormatBytes(resource_size_map[i]),
100.0 * resource_size_map[i] / total_resource_size)
def _GetResourceIdNameMap(build_type):
"""Returns a map of {resource_id: resource_name}."""
out_dir = os.path.join(host_paths.DIR_SOURCE_ROOT, 'out', build_type)
assert os.path.isdir(out_dir), 'Failed to locate out dir at %s' % out_dir
print 'Looking at resources in: %s' % out_dir
grit_headers = []
for root, _, files in os.walk(out_dir):
if root.endswith('grit'):
grit_headers += [os.path.join(root, f) for f in files if f.endswith('.h')]
assert grit_headers, 'Failed to find grit headers in %s' % out_dir
id_name_map = {}
for header in grit_headers:
with open(header, 'r') as f:
for line in f.readlines():
m = _RC_HEADER_RE.match(line.strip())
if m:
i = int(m.group('id'))
name = m.group('name')
if i in id_name_map and name != id_name_map[i]:
print 'WARNING: Resource ID conflict %s (%s vs %s)' % (
i, id_name_map[i], name)
id_name_map[i] = name
return id_name_map
def PrintStaticInitializersCount(so_with_symbols_path, chartjson=None):
"""Emits the performance result for static initializers found in the provided
shared library. Additionally, files for which static initializers were
found are printed on the standard output.
Args:
so_with_symbols_path: Path to the unstripped libchrome.so file.
"""
# GetStaticInitializers uses get-static-initializers.py to get a list of all
# static initializers. This does not work on all archs (particularly arm).
# TODO(rnephew): Get rid of warning when crbug.com/585588 is fixed.
si_count = CountStaticInitializers(so_with_symbols_path)
static_initializers = GetStaticInitializers(so_with_symbols_path)
if si_count != len(static_initializers):
print ('There are %d files with static initializers, but '
'dump-static-initializers found %d:' %
(si_count, len(static_initializers)))
else:
print 'Found %d files with static initializers:' % si_count
print '\n'.join(static_initializers)
ReportPerfResult(chartjson, 'StaticInitializersCount', 'count',
si_count, 'count')
def _FormatBytes(byts):
"""Pretty-print a number of bytes."""
if byts > 2**20.0:
byts /= 2**20.0
return '%.2fm' % byts
if byts > 2**10.0:
byts /= 2**10.0
return '%.2fk' % byts
return str(byts)
def _CalculateCompressedSize(file_path):
CHUNK_SIZE = 256 * 1024
compressor = zlib.compressobj()
total_size = 0
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(CHUNK_SIZE), ''):
total_size += len(compressor.compress(chunk))
total_size += len(compressor.flush())
return total_size
def main(argv):
usage = """Usage: %prog [options] file1 file2 ...
Pass any number of files to graph their sizes. Any files with the extension
'.apk' will be broken down into their components on a separate graph."""
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option('--so-path', help='Path to libchrome.so.')
option_parser.add_option('--so-with-symbols-path',
help='Path to libchrome.so with symbols.')
option_parser.add_option('--min-pak-resource-size', type='int',
default=20*1024,
help='Minimum byte size of displayed pak resources.')
option_parser.add_option('--build_type', dest='build_type', default='Debug',
help='Sets the build type, default is Debug.')
option_parser.add_option('--chartjson', action="store_true",
help='Sets output mode to chartjson.')
option_parser.add_option('--output-dir', default='.',
help='Directory to save chartjson to.')
option_parser.add_option('-d', '--device',
help='Dummy option for perf runner.')
options, args = option_parser.parse_args(argv)
files = args[1:]
chartjson = _BASE_CHART.copy() if options.chartjson else None
# For backward compatibilty with buildbot scripts, treat --so-path as just
# another file to print the size of. We don't need it for anything special any
# more.
if options.so_path:
files.append(options.so_path)
if not files:
option_parser.error('Must specify a file')
devil_chromium.Initialize()
if options.so_with_symbols_path:
PrintStaticInitializersCount(
options.so_with_symbols_path, chartjson=chartjson)
PrintResourceSizes(files, chartjson=chartjson)
for f in files:
if f.endswith('.apk'):
PrintApkAnalysis(f, chartjson=chartjson)
PrintPakAnalysis(f, options.min_pak_resource_size, options.build_type)
if chartjson:
results_path = os.path.join(options.output_dir, 'results-chart.json')
with open(results_path, 'w') as json_file:
json.dump(chartjson, json_file)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
from common_fixtures import * # NOQA
from cattle import ApiError
class Context:
def __init__(self, ctx, driver):
self.new_context = ctx
self.driver = driver
@pytest.fixture(scope='function')
def storage_driver_context(new_context, super_client):
client = new_context.client
stack = client.create_stack(name=random_str(),
startOnCreate=True)
super_client.update(stack, system=True)
stack = client.wait_success(stack)
assert stack.state == 'active'
s = client.create_storage_driver_service(
name=random_str(),
startOnCreate=True,
stackId=stack.id,
storageDriver={
'foo': 'bar'
})
s = client.wait_success(s)
assert s.state == 'active'
assert s.kind == 'storageDriverService'
wait_for(lambda: len(s.storageDrivers()) == 1)
driver = find_one(s.storageDrivers)
return Context(new_context, driver)
def test_storage_driver_in_use(new_context, super_client):
client = new_context.client
stack = client.create_stack(name=random_str(),
startOnCreate=True)
super_client.update(stack, system=True)
stack = client.wait_success(stack)
assert stack.state == 'active'
s = client.create_storage_driver_service(
name=random_str(),
startOnCreate=True,
stackId=stack.id,
storageDriver={
'foo': 'bar'
})
s = client.wait_success(s)
assert s.state == 'active'
driver = find_one(s.storageDrivers)
vol_name = random_str()
c = new_context.create_container(dataVolumes=[
'{}:/tmp'.format(vol_name)
],
volumeDriver=driver.name)
c = client.wait_success(c)
assert c.state == 'running'
vol = find_one(client.list_volume, name=vol_name)
assert find_one(vol.storagePools).storageDriverId == driver.id
with pytest.raises(ApiError):
s.deactivate()
with pytest.raises(ApiError):
client.delete(s)
with pytest.raises(ApiError):
stack.deactivateservices()
with pytest.raises(ApiError):
client.delete(stack)
def test_create_storage_driver_create_local(new_context, super_client):
client = new_context.client
driver_name = 'test' + random_str()
stack = client.create_stack(name=random_str())
super_client.update(stack, system=True)
s = client.create_storage_driver_service(
name=random_str(),
stackId=stack.id,
storageDriver={
'name': driver_name,
'scope': 'local',
})
s = client.wait_success(s)
assert s.state == 'inactive'
sds = client.list_storage_driver(serviceId=s.id,
name=driver_name)
assert len(sds) == 1
s = client.wait_success(s.activate())
assert s.state == 'active'
sd = find_one(client.list_storage_driver, serviceId=s.id, name=driver_name)
sd = client.wait_success(sd)
find_one(s.storageDrivers)
assert sd.state == 'active'
assert sd.kind == 'storageDriver'
assert sd.serviceId == s.id
assert sd.scope == 'local'
def test_create_storage_driver_create_delete(new_context, super_client):
client = new_context.client
host = new_context.host
assert len(host.storagePools()) == 1
driver_name = 'test' + random_str()
stack = client.create_stack(name=random_str())
super_client.update(stack, system=True)
s = client.create_storage_driver_service(
name=random_str(),
stackId=stack.id,
storageDriver={
'name': driver_name,
'volumeAccessMode': 'singleHostRW',
'blockDevicePath': 'some path',
'volumeCapabilities': [
'superAwesome',
],
})
s = client.wait_success(s)
assert s.state == 'inactive'
sds = client.list_storage_driver(serviceId=s.id,
name=driver_name)
assert len(sds) == 1
s = client.wait_success(s.activate())
assert s.state == 'active'
sd = find_one(client.list_storage_driver, serviceId=s.id, name=driver_name)
sd = client.wait_success(sd)
find_one(s.storageDrivers)
assert sd.state == 'active'
assert sd.kind == 'storageDriver'
assert sd.serviceId == s.id
assert sd.scope == 'environment'
assert sd.volumeAccessMode == 'singleHostRW'
assert sd.volumeCapabilities == ['superAwesome']
pools = [x for x in host.storagePools() if x.storageDriverId == sd.id]
assert len(host.storagePools()) == 2
assert len(pools) == 1
stack = client.wait_success(stack.remove())
assert stack.state == 'removed'
s = client.wait_success(s)
assert s.state == 'removed'
sd = client.wait_success(sd)
assert sd.state == 'removed'
def test_volume_create_from_driver(storage_driver_context):
client = storage_driver_context.new_context.client
host = storage_driver_context.new_context.host
driver = storage_driver_context.driver
volume = client.create_volume(name=random_str(),
driver=driver.name,
hostId=host.id,
driverOpts={'created': 'true'})
volume = client.wait_success(volume)
assert volume.state == 'detached'
assert volume.storageDriverId == driver.id
assert volume.driver == driver.name
def test_volume_create_from_driver2(storage_driver_context, super_client):
client = storage_driver_context.new_context.client
host = storage_driver_context.new_context.host
driver = storage_driver_context.driver
volume = client.create_volume(name=random_str(),
storageDriverId=driver.id,
hostId=host.id,
driverOpts={'created': 'true'})
volume = client.wait_success(volume)
assert volume.state == 'detached'
assert volume.storageDriverId == driver.id
assert volume.driver == driver.name
volume = super_client.reload(volume)
assert len(volume.storagePools()) == 1
volume = client.wait_success(volume.remove())
assert volume.removed is not None
def test_volume_create_from_user(storage_driver_context):
client = storage_driver_context.new_context.client
host = storage_driver_context.new_context.host
driver = storage_driver_context.driver
volume = client.create_volume(name=random_str(),
storageDriverId=driver.id)
volume = client.wait_success(volume)
assert volume.state == 'inactive'
assert volume.storageDriverId == driver.id
assert volume.driver == driver.name
volume = client.update(volume, hostId=host.id,
driverOpts={'created': 'true'})
volume = client.wait_success(volume)
assert volume.state == 'detached'
assert volume.hostId == host.id
def test_rm_sp_after_host_remove_if_local(new_context, super_client):
client = new_context.client
host = new_context.host
assert len(host.storagePools()) == 1
driver_name = 'test' + random_str()
stack = client.create_stack(name=random_str())
super_client.update(stack, system=True)
s = client.create_storage_driver_service(
name=random_str(),
stackId=stack.id,
storageDriver={
'name': driver_name,
'scope': 'local',
'volumeAccessMode': 'singleHostRW',
'blockDevicePath': 'some path',
'volumeCapabilities': [
'superAwesome',
],
})
s = client.wait_success(s)
assert s.state == 'inactive'
sds = client.list_storage_driver(serviceId=s.id,
name=driver_name)
assert len(sds) == 1
s = client.wait_success(s.activate())
assert s.state == 'active'
sd = find_one(client.list_storage_driver, serviceId=s.id, name=driver_name)
sd = client.wait_success(sd)
sps = host.storagePools()
for sp in sps:
if sp.storageDriverId == sd.id:
host = client.wait_success(host.deactivate())
client.wait_success(host.remove())
sp = client.wait_success(sp)
assert sp.state == 'removed'
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
from __future__ import print_function
import email
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
with open(mailmap, 'r') as fp:
for l in fp:
try:
canonical_email, alias = re.match(
r'[^#]*?(<.+>).*(<.+>).*', l).groups()
except AttributeError:
continue
mapping[alias] = canonical_email
return mapping
def _parse_git_mailmap(git_dir, mailmap='.mailmap'):
mailmap = os.path.join(os.path.dirname(git_dir), mailmap)
return parse_mailmap(mailmap)
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email_address in mapping.iteritems():
changelog = changelog.replace(alias, email_address)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
with open(requirements_file, 'r') as fil:
return fil.read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def _run_shell_command(cmd, throw_on_error=False):
if os.name == 'nt':
output = subprocess.Popen(["cmd.exe", "/C", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = output.communicate()
if output.returncode and throw_on_error:
raise Exception("%s returned %d" % cmd, output.returncode)
if not out:
return None
return out[0].strip() or None
def _get_git_directory():
parent_dir = os.path.dirname(__file__)
while True:
git_dir = os.path.join(parent_dir, '.git')
if os.path.exists(git_dir):
return git_dir
parent_dir, child = os.path.split(parent_dir)
if not child: # reached to root dir
return None
def write_git_changelog():
"""Write a changelog based on the git changelog."""
new_changelog = 'ChangeLog'
git_dir = _get_git_directory()
if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
if git_dir:
git_log_cmd = 'git --git-dir=%s log' % git_dir
changelog = _run_shell_command(git_log_cmd)
mailmap = _parse_git_mailmap(git_dir)
with open(new_changelog, "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
else:
open(new_changelog, 'w').close()
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = ('(jenkins@review.(openstack|stackforge).org'
'|openstack-infra@lists.openstack.org)')
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
git_dir = _get_git_directory()
if not os.getenv('SKIP_GENERATE_AUTHORS'):
if git_dir:
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git --git-dir=" + git_dir +
" log --format='%aN <%aE>' | sort -u | "
"egrep -v '" + jenkins_email + "'")
changelog = _run_shell_command(git_log_cmd)
signed_cmd = ("git --git-dir=" + git_dir +
" log | grep -i Co-authored-by: | sort -u")
signed_entries = _run_shell_command(signed_cmd)
if signed_entries:
new_entries = [signed.split(":", 1)[1].strip()
for signed in signed_entries.split("\n") if signed]
for new_entry in new_entries:
if new_entry not in changelog:
changelog += "\n" + new_entry
changelog += "\n"
mailmap = _parse_git_mailmap(git_dir)
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
else:
open(new_authors, 'w').close()
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
builders = ['html', 'man']
def generate_autoindex(self):
print("**Autodocumenting from %s" % os.path.abspath(os.curdir))
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules)
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print("Generating %s" % output_filename)
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in self.builders:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
class LocalBuildLatex(LocalBuildDoc):
builders = ['latex']
cmdclass['build_sphinx'] = LocalBuildDoc
cmdclass['build_sphinx_latex'] = LocalBuildLatex
except ImportError:
pass
return cmdclass
def _get_revno(git_dir):
"""Return the number of commits since the most recent tag.
We use git-describe to find this out, but if there are no
tags then we fall back to counting commits since the beginning
of time.
"""
describe = _run_shell_command(
"git --git-dir=%s describe --always" % git_dir)
if "-" in describe:
return describe.rsplit("-", 2)[-2]
# no tags found
revlist = _run_shell_command(
"git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir)
return len(revlist.splitlines())
def _get_version_from_git(pre_version):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
git_dir = _get_git_directory()
if git_dir:
if pre_version:
try:
return _run_shell_command(
"git --git-dir=" + git_dir + " describe --exact-match",
throw_on_error=True).replace('-', '.')
except Exception:
sha = _run_shell_command(
"git --git-dir=" + git_dir + " log -n1 --pretty=format:%h")
return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha)
else:
return _run_shell_command(
"git --git-dir=" + git_dir + " describe --always").replace(
'-', '.')
return None
def _get_version_from_pkg_info(package_name):
"""Get the version from PKG-INFO file if we can."""
try:
pkg_info_file = open('PKG-INFO', 'r')
except (IOError, OSError):
return None
try:
pkg_info = email.message_from_file(pkg_info_file)
except email.MessageError:
return None
# Check to make sure we're in our own dir
if pkg_info.get('Name', None) != package_name:
return None
return pkg_info.get('Version', None)
def get_version(package_name, pre_version=None):
"""Get the version of the project. First, try getting it from PKG-INFO, if
it exists. If it does, that means we're in a distribution tarball or that
install has happened. Otherwise, if there is no PKG-INFO file, pull the
version from git.
We do not support setup.py version sanity in git archive tarballs, nor do
we support packagers directly sucking our git repo into theirs. We expect
that a source tarball be made from our git repo - or that if someone wants
to make a source tarball from a fork of our repo with additional tags in it
that they understand and desire the results of doing that.
"""
version = os.environ.get("OSLO_PACKAGE_VERSION", None)
if version:
return version
version = _get_version_from_pkg_info(package_name)
if version:
return version
version = _get_version_from_git(pre_version)
if version:
return version
raise Exception("Versioning for this project requires either an sdist"
" tarball, or access to an upstream git repository.")
|
|
'''
Taken from salt: salt/utils/jinja.py
https://github.com/saltstack/salt/blob/develop/salt/utils/jinja.py
Copyright 2014 SaltStack Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
N3X15 4/22/2015
- Modified to not require salt to use. This is too nifty to require dragging salt around.
- Bumped some infos up to warnings.
- Added salty_ninja_envs() so I don't have to stick this giant header into config.py
'''
# Import python libs
from __future__ import absolute_import
import json
import pprint
import logging
from functools import wraps
# Import third party libs
import six
from jinja2 import Markup, nodes
from jinja2.environment import TemplateModule
from jinja2.ext import Extension
from jinja2.exceptions import TemplateRuntimeError
import jinja2
import yaml
from collections import OrderedDict
log = logging.getLogger(__name__)
__all__ = [
'SerializerExtension',
'salty_jinja_envs'
]
def salty_jinja_envs(trim_blocks=False,lstrip_blocks=False):
env_args = {'extensions': []}
if hasattr(jinja2.ext, 'with_'):
env_args['extensions'].append('jinja2.ext.with_')
if hasattr(jinja2.ext, 'do'):
env_args['extensions'].append('jinja2.ext.do')
if hasattr(jinja2.ext, 'loopcontrols'):
env_args['extensions'].append('jinja2.ext.loopcontrols')
env_args['extensions'].append(SerializerExtension)
# Pass through trim_blocks and lstrip_blocks Jinja parameters
# trim_blocks removes newlines around Jinja blocks
# lstrip_blocks strips tabs and spaces from the beginning of
# line to the start of a block.
if trim_blocks:
log.debug('Jinja2 trim_blocks is enabled')
env_args['trim_blocks'] = True
if lstrip_blocks:
log.debug('Jinja2 lstrip_blocks is enabled')
env_args['lstrip_blocks'] = True
return env_args
# To dump OrderedDict objects as regular dicts. Used by the yaml
# template filter.
class OrderedDictDumper(yaml.Dumper): # pylint: disable=W0232
pass
yaml.add_representer(OrderedDict,
yaml.representer.SafeRepresenter.represent_dict,
Dumper=OrderedDictDumper)
class PrintableDict(OrderedDict):
'''
Ensures that dict str() and repr() are YAML friendly.
.. code-block:: python
mapping = OrderedDict([('a', 'b'), ('c', None)])
print mapping
# OrderedDict([('a', 'b'), ('c', None)])
decorated = PrintableDict(mapping)
print decorated
# {'a': 'b', 'c': None}
'''
def __str__(self):
output = []
for key, value in six.iteritems(self):
if isinstance(value, six.string_types):
# keeps quotes around strings
output.append('{0!r}: {1!r}'.format(key, value))
else:
# let default output
output.append('{0!r}: {1!s}'.format(key, value))
return '{' + ', '.join(output) + '}'
def __repr__(self): # pylint: disable=W0221
output = []
for key, value in six.iteritems(self):
output.append('{0!r}: {1!r}'.format(key, value))
return '{' + ', '.join(output) + '}'
def ensure_sequence_filter(data):
'''
Ensure sequenced data.
**sequence**
ensure that parsed data is a sequence
.. code-block:: yaml
{% set my_string = "foo" %}
{% set my_list = ["bar", ] %}
{% set my_dict = {"baz": "qux"} %}
{{ my_string|sequence|first }}
{{ my_list|sequence|first }}
{{ my_dict|sequence|first }}
will be rendered as:
.. code-block:: yaml
foo
bar
baz
'''
if not isinstance(data, (list, tuple, set, dict)):
return [data]
return data
@jinja2.contextfunction
def show_full_context(ctx):
return ctx
# REMOVED SaltCacheLoader.
class SerializerExtension(Extension, object):
'''
Yaml and Json manipulation.
**Format filters**
Allows to jsonify or yamlify any data structure. For example, this dataset:
.. code-block:: python
data = {
'foo': True,
'bar': 42,
'baz': [1, 2, 3],
'qux': 2.0
}
.. code-block:: jinja
yaml = {{ data|yaml }}
json = {{ data|json }}
python = {{ data|python }}
will be rendered as::
yaml = {bar: 42, baz: [1, 2, 3], foo: true, qux: 2.0}
json = {"baz": [1, 2, 3], "foo": true, "bar": 42, "qux": 2.0}
python = {'bar': 42, 'baz': [1, 2, 3], 'foo': True, 'qux': 2.0}
The yaml filter takes an optional flow_style parameter to control the
default-flow-style parameter of the YAML dumper.
.. code-block:: jinja
{{ data|yaml(False) }}
will be rendered as:
.. code-block:: yaml
bar: 42
baz:
- 1
- 2
- 3
foo: true
qux: 2.0
**Load filters**
Strings and variables can be deserialized with **load_yaml** and
**load_json** tags and filters. It allows one to manipulate data directly
in templates, easily:
.. code-block:: jinja
{%- set yaml_src = "{foo: it works}"|load_yaml %}
{%- set json_src = "{'bar': 'for real'}"|load_json %}
Dude, {{ yaml_src.foo }} {{ json_src.bar }}!
will be rendered has::
Dude, it works for real!
**Load tags**
Salt implements **import_yaml** and **import_json** tags. They work like
the `import tag`_, except that the document is also deserialized.
Syntaxes are {% load_yaml as [VARIABLE] %}[YOUR DATA]{% endload %}
and {% load_json as [VARIABLE] %}[YOUR DATA]{% endload %}
For example:
.. code-block:: jinja
{% load_yaml as yaml_src %}
foo: it works
{% endload %}
{% load_json as json_src %}
{
"bar": "for real"
}
{% endload %}
Dude, {{ yaml_src.foo }} {{ json_src.bar }}!
will be rendered has::
Dude, it works for real!
**Import tags**
External files can be imported and made available as a Jinja variable.
.. code-block:: jinja
{% import_yaml "myfile.yml" as myfile %}
{% import_json "defaults.json" as defaults %}
{% import_text "completeworksofshakespeare.txt" as poems %}
**Catalog**
``import_*`` and ``load_*`` tags will automatically expose their
target variable to import. This feature makes catalog of data to
handle.
for example:
.. code-block:: jinja
# doc1.sls
{% load_yaml as var1 %}
foo: it works
{% endload %}
{% load_yaml as var2 %}
bar: for real
{% endload %}
.. code-block:: jinja
# doc2.sls
{% from "doc1.sls" import var1, var2 as local2 %}
{{ var1.foo }} {{ local2.bar }}
.. _`import tag`: http://jinja.pocoo.org/docs/templates/#import
'''
tags = set(['load_yaml', 'load_json', 'import_yaml', 'import_json',
'load_text', 'import_text'])
def __init__(self, environment):
super(SerializerExtension, self).__init__(environment)
self.environment.filters.update({
'yaml': self.format_yaml,
'json': self.format_json,
'python': self.format_python,
'load_yaml': self.load_yaml,
'load_json': self.load_json,
'load_text': self.load_text,
})
if self.environment.finalize is None:
self.environment.finalize = self.finalizer
else:
finalizer = self.environment.finalize
@wraps(finalizer)
def wrapper(self, data):
return finalizer(self.finalizer(data))
self.environment.finalize = wrapper
def finalizer(self, data):
'''
Ensure that printed mappings are YAML friendly.
'''
def explore(data):
if isinstance(data, (dict, OrderedDict)):
return PrintableDict(
[(key, explore(value)) for key, value in six.iteritems(data)]
)
elif isinstance(data, (list, tuple, set)):
return data.__class__([explore(value) for value in data])
return data
return explore(data)
def format_json(self, value, sort_keys=True, indent=None):
return Markup(json.dumps(value, sort_keys=sort_keys, indent=indent).strip())
def format_yaml(self, value, flow_style=True):
yaml_txt = yaml.dump(value, default_flow_style=flow_style,
Dumper=OrderedDictDumper).strip()
if yaml_txt.endswith('\n...\n'):
# Changed to warning. - N3X
log.warn('Yaml filter ended with "\n...\n". This trailing string '
'will be removed in Boron.')
return Markup(yaml_txt)
def format_python(self, value):
return Markup(pprint.pformat(value).strip())
def load_yaml(self, value):
if isinstance(value, TemplateModule):
value = str(value)
try:
return yaml.safe_load(value)
except AttributeError:
raise TemplateRuntimeError(
'Unable to load yaml from {0}'.format(value))
def load_json(self, value):
if isinstance(value, TemplateModule):
value = str(value)
try:
return json.loads(value)
except (ValueError, TypeError, AttributeError):
raise TemplateRuntimeError(
'Unable to load json from {0}'.format(value))
def load_text(self, value):
if isinstance(value, TemplateModule):
value = str(value)
return value
_load_parsers = set(['load_yaml', 'load_json', 'load_text'])
def parse(self, parser):
if parser.stream.current.value == 'import_yaml':
return self.parse_yaml(parser)
elif parser.stream.current.value == 'import_json':
return self.parse_json(parser)
elif parser.stream.current.value == 'import_text':
return self.parse_text(parser)
elif parser.stream.current.value in self._load_parsers:
return self.parse_load(parser)
parser.fail('Unknown format ' + parser.stream.current.value,
parser.stream.current.lineno)
# pylint: disable=E1120,E1121
def parse_load(self, parser):
filter_name = parser.stream.current.value
lineno = next(parser.stream).lineno
if filter_name not in self.environment.filters:
parser.fail('Unable to parse {0}'.format(filter_name), lineno)
parser.stream.expect('name:as')
target = parser.parse_assign_target()
macro_name = '_' + parser.free_identifier().name
macro_body = parser.parse_statements(
('name:endload',), drop_needle=True)
return [
nodes.Macro(
macro_name,
[],
[],
macro_body
).set_lineno(lineno),
nodes.Assign(
target,
nodes.Filter(
nodes.Call(
nodes.Name(macro_name, 'load').set_lineno(lineno),
[],
[],
None,
None
).set_lineno(lineno),
filter_name,
[],
[],
None,
None
).set_lineno(lineno)
).set_lineno(lineno)
]
def parse_yaml(self, parser):
import_node = parser.parse_import()
target = import_node.target
lineno = import_node.lineno
return [
import_node,
nodes.Assign(
nodes.Name(target, 'store').set_lineno(lineno),
nodes.Filter(
nodes.Name(target, 'load').set_lineno(lineno),
'load_yaml',
[],
[],
None,
None
)
.set_lineno(lineno)
).set_lineno(lineno)
]
def parse_json(self, parser):
import_node = parser.parse_import()
target = import_node.target
lineno = import_node.lineno
return [
import_node,
nodes.Assign(
nodes.Name(target, 'store').set_lineno(lineno),
nodes.Filter(
nodes.Name(target, 'load').set_lineno(lineno),
'load_json',
[],
[],
None,
None
)
.set_lineno(lineno)
).set_lineno(lineno)
]
def parse_text(self, parser):
import_node = parser.parse_import()
target = import_node.target
lineno = import_node.lineno
return [
import_node,
nodes.Assign(
nodes.Name(target, 'store').set_lineno(lineno),
nodes.Filter(
nodes.Name(target, 'load').set_lineno(lineno),
'load_text',
[],
[],
None,
None
)
.set_lineno(lineno)
).set_lineno(lineno)
]
# pylint: enable=E1120,E1121
|
|
"""
Analysing Scales of Precipitation diagnostics
Space-time Coherence package
These functions allow the user to compute and plot diagnostics of the
spatial and temporal coherence of precipitation in observations and/or
models. These functions should work for any resolution or timescale.
These functions assume that the input precipitation dataset is in netCDF.
Written by Nicholas Klingaman
nicholas.klingaman@ncas.ac.uk
(C) The author 2017
"""
import csv
import os
import json
import numpy as np
import iris
import cf,cfplot as cfp
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from six.moves import range
def update_output_json(json_key, data_description, wk_dir):
"""Add the dictionary 'data_description' under the key 'json_key' in
the descriptive output json"""
json_filename = os.path.join(wk_dir,"output.json")
# Only update output json if it exists
if os.path.exists(json_filename):
with open(json_filename,"r") as output_json:
output=json.load(output_json)
output[json_key].update(data_description)
with open(json_filename,"w") as output_json:
json.dump(output,output_json, indent=2)
def default_options():
"""
Dummy function to return initial values of key parameters that must be defined.
"""
region_size=0
input_box_size=0
lag_length=0
autocorr_length=0
time_type='None'
grid_type='None'
time_desc='None'
grid_desc='None'
model_desc='None'
return(region_size,input_box_size,lag_length,autocorr_length,time_type,grid_type,time_desc,grid_desc,model_desc)
def parameters():
"""
Parameters that control the size of arrays used in other functions.
"""
max_box_distance=100
max_timesteps=100
max_boxes=100
return(max_box_distance,max_boxes,max_timesteps)
def read_precip(model_dict):
"""
Use iris to read precipitation data into a Cube.
The area to read is controlled by the "region" element of the dataset dictionary (model_dict)
"region" should be defined as min_lat, max_lat, min_lon, max_lon.
Arguments:
* model_dict: The dictionary containing details of the dataset to read.
(See the create_dict function).
Returns:
* precip: An iris cube of precipitation data.
"""
constraint = iris.Constraint(model_dict['constraint'],
latitude=lambda cell: model_dict['region'][0] <= cell <= model_dict['region'][1],
longitude=lambda cell: model_dict['region'][2] <= cell <= model_dict['region'][3])
precip = iris.load_cube(model_dict['infile'],constraint)*model_dict['scale_factor']
try:
precip.coord('latitude').guess_bounds()
except:
pass
try:
precip.coord('longitude').guess_bounds()
except:
pass
return(precip)
def compute_histogram(precip,bins):
"""
Computes 1D and 2D histograms of precipitation from an input iris cube of precipitation.
The 2D histogram is the histogram of precipitation on consecutive timesteps.
Arguments:
* precip :
An iris cube of precipitation
* bins:
A numpy array of the edges of the bins for which to compute the histogram,
in the same units as "precip" above.
Returns:
* oned_hist:
The one-dimensional histogram, as a numpy array, normalised so that the sum of the values is one.
* twod_hist:
The two-dimensional histogram, as a numpy array, normalised so that the sum of the values is one.
"""
oned_hist, bin_edges = np.histogram(precip.data,bins)
nbins=len(oned_hist)
twod_hist=np.zeros((nbins,nbins))
print('---> Computing 2D histogram')
for t, t_slice in enumerate(precip.slices(['time'])):
next_slice = t_slice.copy()
next_slice.data = np.roll(t_slice.data,1,0)
twod_hist_temp,xedges,yedges = np.histogram2d(t_slice.data,next_slice.data,bins)
twod_hist=twod_hist+twod_hist_temp
twod_hist=twod_hist/np.sum(twod_hist)
oned_hist=oned_hist/np.float(np.sum(oned_hist))
return(oned_hist,twod_hist)
def plot_histogram(oned_hist,twod_hist,model_dict,bins,title=True,colorbar=True,wk_dir='.',ext='.ps'):
"""
Creates a PostScript plot of the 1D and 2D histograms calculated in compute_histogram.
Arguments:
* oned_hist:
The 1D histogram computed in compute_histogram.
* twod_hist:
The 2D histogram computed in compute_histogram.
* model_dict:
The dictioary containing details of this model.
* bins:
The edges of the histogram bins, as a numpy array.
Optional arguments:
* title:
A logical to control whether the title is printed at the top of the plot (useful for creating multi-panel plots).
* colorbar:
A logical to control whether the colorbar is printed at the bottom of the plot (useful for creating multi-panel plots).
"""
print('---> Plotting 2D histogram')
nbins = np.size(oned_hist)
hist_con_levs=[1e-5,2e-5,4e-5,7e-5,1e-4,2e-4,4e-4,7e-4,1e-3,2e-3,4e-3,7e-3,1e-2,2e-2,4e-2,7e-2,1e-1]
fig = plt.figure()
ax = fig.add_subplot(111)
cmap = plt.cm.get_cmap("viridis_r")
norm = BoundaryNorm(hist_con_levs,ncolors=cmap.N,clip=True)
contour = ax.pcolormesh(np.arange(nbins+1),np.arange(nbins+1),twod_hist,cmap=cmap,norm=norm)
if colorbar == True:
cbar = fig.colorbar(contour,orientation='horizontal',ticks=hist_con_levs)
cbar.ax.set_xlabel('Probability',fontsize=18)
cbar.ax.set_xticklabels(['1e-5','2e-5','4e-5','7e-5','1e-4','2e-4','4e-4','7e-4','1e-3','2e-3','4e-3','7e-3','1e-2','2e-2','4e-2','7e-2','1e-1'])
ax.set_xlabel('Precipitation at time t (mm day$^{-1}$)',fontsize=16)
ax.set_ylabel('Precipitation at time t+1 (mm day$^{-1}$)',fontsize=16)
ticklabels=['< '+str(bins[1])]
for bin in range(1,nbins):
ticklabels.append(str(bins[bin]))
ticklabels.append(' > '+str(bins[nbins-1]))
ax.set_xticks(np.arange(nbins+1))
ax.set_xticklabels(ticklabels,fontsize=14)
ax.set_yticks(np.arange(nbins+1))
ax.set_yticklabels(ticklabels,fontsize=14)
if title == True:
title_string = '2D histogram for '+model_dict['legend_name']
if 'time_desc' in model_dict:
title_string = title_string+' '+model_dict['time_desc']
if 'grid_desc' in model_dict:
title_string = title_string+' '+model_dict['grid_desc']
if 'region_desc' in model_dict:
title_string = title_string+' - '+model_dict['region_desc']
title_string = title_string + ' data'
ax.set_title(title_string)
ax.axis([0,nbins,0,nbins])
ax.set_xlim(xmin=0,xmax=nbins)
ax2 = ax.twinx()
ax2.plot(np.arange(nbins)+0.5,oned_hist,'k--',marker='o',markersize=8)
ax2.set_yscale('log',nonposy='clip')
ax2.set_ylim(ymin=0.0009,ymax=1.0)
ax2.set_ylabel('Probability of precipitation in bin',fontsize=18)
ax2.set_yticks([1e-3,1.4e-3,2e-3,3e-3,4.5e-3,7e-3,1e-2,1.4e-2,2e-2,3e-2,4.5e-2,7e-2,1e-1,1.4e-1,2e-1,3e-1,4.5e-1,7e-1,1])
ax2.set_yticklabels(['1.0e-3','1.4e-3','2.0e-3','3.0e-3','4.5e-3','7.0e-3','1.0e-2','1.4e-2','2.0e-2','3.0e-2','4.5e-2',
'7.0e-2','1.0e-1','1.4e-1','2.0e-1','3.0e-1','4.5e-1','7.0e-1','1.0e0'],fontsize=14)
ax2.set_xlim(xmin=0,xmax=nbins)
plot_name=wk_dir+'/asop_coherence.'+model_dict['name']
if 'grid_type' in model_dict:
plot_name=plot_name+'_'+model_dict['grid_type']
if 'time_type' in model_dict:
plot_name=plot_name+'_'+model_dict['time_type']
if 'region_name' in model_dict:
plot_name=plot_name+'_'+model_dict['region_name'].replace(" ","_")
plot_name=plot_name+'_precip_twodpdf'+ext
plt.savefig(plot_name,bbox_inches='tight')
desc = {os.path.relpath(plot_name,wk_dir): {
"longname": os.path.basename(plot_name).split(".")[1].replace("_"," "),
"description": "1-d and 2-d histogram plots"
}
}
update_output_json("plots",desc,wk_dir)
def haversine(origin, destination):
"""
Compute distance between two gridpoints in km.
Method: Haversine function
Arguments:
* origin: Tuple of (latitude,longitude) for origin point
* destination: Tuple of (latitude, longitude) for destination point
Returns:
* d: distance between origin and destination in km
"""
import math
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
def compute_equalgrid_corr(precip,model_dict,metrics_csv=None,wk_dir='.'):
"""
Compute correlations in space and time, using the native spatial and temporal
resolutions of the input data.
Method:
The input spatial domain is broken down into non-overlapping regions of N x N gridpoints, where
N is controlled by the "region_size" input parameter.
Correlations in both space and time are computed with respect to lag-0 at the central
point in each region.
Composite (average) correlations are computed over all regions in the domain.
To express the average correlations as a function of distance from the central point, bins of
distance from the central point are created, with width delta_x (the x spacing of the input data,
taken from the dataset dictionary), starting from 0.5*delta_x. Correlations are averaged within each bin,
at each lag.
Arguments:
* precip:
An iris cube of precipitation data
* model_dict:
The dictionary containing information about this dataset.
Returns:
* corr_map (lag_length,region_size,region_size):
A 'map' of the composite correlations over all regions in the domain, at each lag.
* lag_vs_distance (lag_length,region_size):
The composite correlation over all regions in the domain, expressed a function of time (lag)
and distance from the central point, in bins of delta_x (x spacing of the input data) starting
from 0.5*delta_x.
* autocorr (lag_length):
The composite auto-correlation of precipitation at the central point, averaged over all regions
in the domain.
* npts_map (lag_length,region_size,region_size):
The number of gridpoints that contributed to the composite map of correlations. Can be used
to weight the correlation map if averaging over multiple maps.
* npts (lag_length,region_size):
The number of gridpoints in each lag and distance bin of the lag_vs_distance array. Used by
the corresponding plotting function to determine whether there are any points in this bin.
"""
lag_length = model_dict['lag_length']
region_size = model_dict['region_size']
print('---> Computing correlations for '+str(region_size)+'x'+str(region_size)+' sub-regions')
latitude = precip.coord('latitude').points
longitude = precip.coord('longitude').points
nlon=len(longitude)
nlat=len(latitude)
print('----> Info: Size of domain in native gridpoints: '+str(nlon)+' longitude x '+str(nlat)+' latitude.')
nregions=0
npts=np.zeros((lag_length,region_size),dtype=np.int32)
npts_map=np.zeros((lag_length,region_size,region_size),dtype=np.int32)
# distance_x=np.zeros(region_size)
# distance_y=np.zeros(region_size)
corr_map=np.zeros((lag_length,region_size,region_size))
lag_vs_distance=np.zeros((lag_length,region_size))
autocorr=np.zeros((lag_length))
for region_xstart in range(0,nlon-region_size+1,region_size):
region_xcentre = region_xstart+region_size//2
for region_ystart in range(0,nlat-region_size+1,region_size):
region_ycentre = region_ystart+region_size//2
central_precip = precip[:,region_ycentre,region_xcentre]
# for region_x in range(region_size):
# distance_x[region_x]=np.abs(region_xstart+region_x-region_xcentre)
# for region_y in range(region_size):
# distance_y[region_y]=np.abs(region_ystart+region_y-region_ycentre)*model_dict['dy']/float(model_dict['dx'])
for region_x in range(region_size):
for region_y in range(region_size):
# distance=np.int(round(np.sqrt(distance_x[region_x]*distance_x[region_x]+distance_y[region_y]*distance_y[region_y])))-1
km_distance = haversine((latitude[region_ycentre],longitude[region_xcentre]),(latitude[region_ystart+region_y],longitude[region_xstart+region_x]))
distance = np.int(round(km_distance/model_dict['dx']))-1
if distance < region_size:
remote_precip=precip[:,region_y+region_ystart,region_x+region_xstart]
corr = np.corrcoef([central_precip.data,remote_precip.data])[1,0]
if not np.isnan(corr):
corr_map[0,region_y,region_x]=corr+corr_map[0,region_y,region_x]
npts_map[0,region_y,region_x]=npts_map[0,region_y,region_x]+1
if (region_x + region_xstart == region_xcentre) and (region_y + region_ystart == region_ycentre):
autocorr[0]=autocorr[0]+1
for lag in range(1,lag_length):
corr = np.corrcoef(central_precip.data,np.roll(central_precip.data,lag,0))[1,0]
if not np.isnan(corr):
corr_map[lag,region_y,region_x]=corr+corr_map[lag,region_y,region_x]
npts_map[lag,region_y,region_x]=npts_map[lag,region_y,region_x]+1
autocorr[lag]=corr+autocorr[lag]
npts[lag,0]=npts[lag,0]+1
else:
if not np.isnan(corr):
lag_vs_distance[0,distance]=lag_vs_distance[0,distance]+corr
npts[0,distance]=npts[0,distance]+1
for lag in range(1,lag_length):
corr = np.corrcoef([central_precip.data,np.roll(remote_precip.data,lag,0)])[1,0]
if not np.isnan(corr):
corr_map[lag,region_y,region_x] = corr+corr_map[lag,region_y,region_x]
npts_map[lag,region_y,region_x] = npts_map[lag,region_y,region_x]+1
lag_vs_distance[lag,distance] = corr+lag_vs_distance[lag,distance]
npts[lag,distance]=npts[lag,distance]+1
nregions = nregions+1
corr_map = corr_map/npts_map
print('----> Info: There are '+str(nregions)+' '+str(region_size)+'x'+str(region_size)+' sub-regions in your input data.')
for lag in range(lag_length):
for dist in range(region_size):
if npts[lag,dist] > 0:
lag_vs_distance[lag,dist]=lag_vs_distance[lag,dist]/npts[lag,dist]
# If there are no gridpoints in range, set correlation to a missing value
if npts[lag,dist] == 0:
lag_vs_distance[lag,dist]=-999
autocorr[lag]=autocorr[lag]/nregions
# Write metrics to file for CMEC driver
if metrics_csv is not None:
box_length_x = np.int(model_dict['box_size']//model_dict['dx'])
box_length_y = np.int(model_dict['box_size']//model_dict['dy'])
data_to_csv = {"Dataset": model_dict["name"],
"dx": model_dict["dx"],
"dy": model_dict["dy"],
"region_size": region_size,
"nlon": nlon,
"nlat": nlat,
"nregions": nregions,
"gridbox_x": model_dict["box_size"],
"gridbox_y": model_dict["box_size"],
"nx": box_length_x,
"ny": box_length_y}
field_names = ["Dataset",
"dx",
"dy",
"region_size",
"nlon",
"nlat",
"nregions",
"gridbox_x",
"gridbox_y",
"nx",
"ny"]
if not os.path.exists(metrics_csv):
with open(metrics_csv,"w") as fname:
csv_writer = csv.DictWriter(fname, fieldnames=field_names)
csv_writer.writeheader()
csv_writer.writerow(data_to_csv)
else:
with open(metrics_csv,"a") as fname:
csv_writer = csv.DictWriter(fname, fieldnames=field_names)
csv_writer.writerow(data_to_csv)
data_desc = {
os.path.basename(metrics_csv): {
"longname": os.path.basename(metrics_csv).split(".")[1].replace("_"," "),
"description": "Box dimension information organized by model"
}
}
update_output_json("metrics",data_desc,wk_dir)
return(corr_map,lag_vs_distance,autocorr,npts_map,npts)
def plot_equalgrid_corr(corr_map,lag_vs_distance,autocorr,npts,model_dict,title=True,colorbar=True,wk_dir='.',ext='.ps'):
"""
Plots correlations as functions of space and time, which were first computed
using compute_equalgrid_corr.
Two types of plots are created:
1. For each lag (from 0 to lag_length defined in model_dict), a 2D map
of the composite correlations against the central point (at lag 0) for all
points in the region (of length region_size).
2. A single lag vs. distance plot showing the composite correlations against
the central point (at lag 0), averaged over all points in each region in each
distance bin (in steps of dx starting at 0.5dx), as well as the auto-correlation
at the central point.
See Fig. 2 in Klingaman et al. (2017, GMD, doi:10.5194/gmd-10-57-2017) for examples
of these diagrams.
Arguments:
* corr_map (lag_length,region_size,region_size):
Composite maps of correlations at each lag, returned from compute_equalgrid_corr
* lag_vs_distance (lag_length,region_size):
Composite correlations over all regions in the domain, expressed as a function of
time (lag) and distance from the central point, returned from compute_equalgrid_corr
* autocorr (lag_length):
The composite auto-correlation of precipitation at the central point, averaged over
all regions in the domain.
* npts (lag_length,region_size):
The number of gridpoints in each distance bin of the lag_vs_distance array. Used to
to determine whether there are any points in each distance bin.
Optional arguments:
* title:
Include a title on the plot
* colorbar:
Include a colorbar on the plot
Returns:
None
"""
region_size = model_dict['region_size']
lag_length = model_dict['lag_length']
print('---> Plotting correlation maps for '+str(region_size)+'x'+str(region_size)+' sub-regions')
corr_con_levs=[0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95]
# Plot correlation maps at each lag
for lag in range(lag_length):
plot_name=wk_dir+'/asop_coherence.'+model_dict['name']
if 'grid_type' in model_dict:
plot_name=plot_name+'_'+model_dict['grid_type']
if 'time_type' in model_dict:
plot_name=plot_name+'_'+model_dict['time_type']
if 'region_name' in model_dict:
plot_name=plot_name+'_'+model_dict['region_name'].replace(" ","_")
plot_name=plot_name+'_precip_'+str(region_size)+'x'+str(region_size)+'maps_lag'+str(lag)+ext
cfp.setvars(file=plot_name,text_fontsize=18,axis_label_fontsize=18,colorbar_fontsize=18)
cfp.gopen(figsize=[8,8])
cfp.gset(xmin=0,xmax=region_size,ymin=0,ymax=region_size)
cfp.levs(manual=np.array(corr_con_levs))
cfp.cscale(cmap='parula',reverse=1,ncols=len(corr_con_levs)+1,white=0)
#cfp.axes(xticks=np.arange(region_size)+0.5,yticks=np.arange(region_size)+0.5,
# xticklabels=np.arange(region_size)-region_size//2,yticklabels=np.arange(region_size)-region_size//2)
if title == True:
title_string = 'Correlation map for '+model_dict['legend_name']
if 'time_desc' in model_dict:
title_string = title_string + ' - ' +model_dict['time_desc']
if 'grid_desc' in model_dict:
title_string = title_string + ' - ' +model_dict['grid_desc']
if 'region_desc' in model_dict:
title_string = title_string+' - '+model_dict['region_desc']
title_string = title_string + ' - Lag '+str(lag)
else:
title_string = ''
if colorbar == True:
cfp.con(f=corr_map[lag,:,:],x=np.arange(region_size)+0.5,y=np.arange(region_size)+0.5,blockfill=1,
lines=False,line_labels=False,ptype=0,colorbar=1,colorbar_title='Correlation with (0,0) at lag 0, mean over all sub-regions',
title=title_string, colorbar_text_up_down=True,
xlabel=r'$\Delta$x (approximately '+str(round(model_dict['dx']))+' km)',
ylabel=r'$\Delta$y (approximately '+str(round(model_dict['dy']))+' km)',
xticks=np.arange(region_size)+0.5,yticks=np.arange(region_size)+0.5,
xticklabels=np.arange(region_size)-region_size//2,yticklabels=np.arange(region_size)-region_size//2)
else:
cfp.con(f=corr_map[lag,:,:],x=np.arange(region_size)+0.5,y=np.arange(region_size)+0.5,blockfill=1,
lines=False,line_labels=False,ptype=0,colorbar=0,title=title_string,
xlabel=r'$\Delta$x (approximately '+str(model_dict['dx'])+' km)',
ylabel=r'$\Delta$y (approximately '+str(model_dict['dy'])+' km)',
xticks=np.arange(region_size)+0.5,yticks=np.arange(region_size)+0.5,
xticklabels=np.arange(region_size)-region_size//2,yticklabels=np.arange(region_size)-region_size//2)
for region_x in range(region_size):
for region_y in range(region_size):
if corr_map[lag,region_y,region_x] > 0.5:
cfp.plotvars.plot.text(region_x+0.5,region_y+0.5,str(corr_map[lag,region_y,region_x])[0:4],
horizontalalignment='center',color='white',fontsize=20)
elif corr_map[lag,region_y,region_x] < 0.0:
cfp.plotvars.plot.text(region_x+0.5,region_y+0.5,str(corr_map[lag,region_y,region_x])[0:5],
horizontalalignment='center',color='black',fontsize=20)
else:
cfp.plotvars.plot.text(region_x+0.5,region_y+0.5,str(corr_map[lag,region_y,region_x])[0:4],
horizontalalignment='center',color='black',fontsize=20)
cfp.gclose()
desc = {os.path.relpath(plot_name,wk_dir): {
"longname": os.path.basename(plot_name).split(".")[1].replace("_"," "),
"description": "2D map of the composite correlations against the central point (at lag 0)"+
" for all points in the region for lag {0}".format(str(lag))
}
}
update_output_json("plots",desc,wk_dir)
# Plot correlation vs. distance diagram
print('---> Plotting lag vs. distance diagram')
plot_name=wk_dir+'/asop_coherence.'+model_dict['name']
if 'grid_type' in model_dict:
plot_name=plot_name+'_'+model_dict['grid_type']
if 'time_type' in model_dict:
plot_name=plot_name+'_'+model_dict['time_type']
if 'region_name' in model_dict:
plot_name=plot_name+'_'+model_dict['region_name'].replace(" ","_")
plot_name=plot_name+'_precip_'+str(region_size)+'x'+str(region_size)+'_lag'+str(lag_length)+ext
cfp.setvars(file=plot_name,text_fontsize=20,axis_label_fontsize=18)
cfp.gopen(figsize=[9,8])
ticklabels=['Centre','0.5']
max_dist=0
for dist in range(2,region_size):
if npts[0,dist] > 0 :
ticklabels.append(str(dist-0.5))
max_dist=dist
ticklabels.append(str(max_dist+0.5))
ticklabels.append(str(max_dist+1.5))
lag_vs_distance=np.insert(lag_vs_distance,0,autocorr,1)
cfp.gset(xmin=0,xmax=max_dist+1,ymin=0,ymax=lag_length)
cfp.levs(manual=np.array(corr_con_levs))
cfp.cscale(cmap='parula',reverse=1,ncols=len(corr_con_levs)+1,white=0)
xtickvals=np.arange(max_dist+1)+2.0
xtickvals=np.insert(xtickvals,0,[0.5,1.0])
cfp.axes(xticks=xtickvals,yticks=np.arange(lag_length)+0.5,xticklabels=ticklabels,yticklabels=np.arange(lag_length),
xlabel=r'$\Delta$x bins ($\Delta$x approximately '+str(round(model_dict['dx']))+' km at the equator)',
ylabel='Lag')
if title == True:
title_string = 'Correlation map for '+model_dict['legend_name']
if 'time_desc' in model_dict:
title_string = title_string + ' - ' +model_dict['time_desc']
if 'grid_desc' in model_dict:
title_string = title_string + ' - ' +model_dict['grid_desc']
if 'region_name' in model_dict:
title_string = title_string+' - '+model_dict['region_name']
else:
title_string = ''
if colorbar == True:
cfp.con(f=lag_vs_distance[:,0:max_dist+2],x=np.arange(max_dist+2)+0.5,y=np.arange(lag_length)+0.5,blockfill=1,
lines=False,line_labels=False,ptype=0,colorbar_title='Correlation with centre at lag=0, mean over all sub-regions',
title=title_string)
else:
cfp.con(f=lag_vs_distance[:,0:max_dist+2],x=np.arange(max_dist+2)+0.5,y=np.arange(lag_length)+0.5,blockfill=1,
lines=False,line_labels=False,ptype=0,colorbar=0,title=title_string)
for dist in range(max_dist+2):
for lag in range(lag_length):
if lag_vs_distance[lag,dist] == -999:
print('-999')
# cfp.plotvars.plot.text(dist+0.5,lag+0.5,'XXX',horizontalalignment='center',color='black',fontsize=20,verticalalignment='center')
elif lag_vs_distance[lag,dist] > 0.5:
cfp.plotvars.plot.text(dist+0.5,lag+0.5,str(lag_vs_distance[lag,dist])[0:4],
horizontalalignment='center',color='white',fontsize=20)
elif lag_vs_distance[lag,dist] < 0.0:
cfp.plotvars.plot.text(dist+0.5,lag+0.5,str(lag_vs_distance[lag,dist])[0:5],
horizontalalignment='center',color='black',fontsize=20)
else:
cfp.plotvars.plot.text(dist+0.5,lag+0.5,str(lag_vs_distance[lag,dist])[0:4],
horizontalalignment='center',color='black',fontsize=20)
cfp.gclose()
desc = {os.path.relpath(plot_name,wk_dir): {
"longname": os.path.basename(plot_name).split(".")[1].replace("_"," "),
"description": "Correlation vs distance diagram for lag "+str(lag)
}
}
update_output_json("plots",desc,wk_dir)
def compute_equalarea_corr(precip,model_dict):
"""
Computes spatial correlations as a function of physical distance (in km). Note that unlike
compute_equalgrid_corr, this routine does *not* compute lagged correlations; it computes only
instantaneous correlations.
Method:
As in compute_equalgrid_corr, the analysis domain is divided into square sub-regions that are
box_size in length. Precipitation at each point in the sub-region is correlated against the
central point in the domain. These correlations are binned by the physical distance from the
central point, using bins delta_x wide starting from 0.5*delta_x. Correlations are averaged
within the bin and across all sub-regions.
As of vn1.0.2, distances are computed by a Haversine function rather than by using the user-
specified dx and dy keys in model_dict.
Limitations:
* The number of distance bins is limited to max_box_distance, defined in the "parameters" function above.
Arguments:
* precip
An iris cube of precipitation to analyse
* model_dict
The dictionary for this dataset
Returns:
* distance_correlations (max_box_distance):
Correlations as a function of physical distance, binned by physical distance from the
central point, using bins delta_x wide.
* distance_ranges (3, max_box_distance):
The minimum, median and maximum distance away from the central points, considering
all points in that distance bin
* distance_max (max_box_distance):
The number of bins that contain valid points. Can be used to subscript
"distance_correlations" and "distance_ranges" (i.e., distance_correlations[0:distance_max]).
"""
print('---> Computing correlations for '+str(model_dict['box_size'])+'x'+str(model_dict['box_size'])+' km sub-boxes.')
longitude = precip.coord('longitude').points
nlon=len(precip.coord('longitude').points)
latitude = precip.coord('latitude').points
nlat=len(latitude)
max_box_distance,max_boxes,max_timesteps = parameters()
box_length_x = np.int(model_dict['box_size']//model_dict['dx'])
box_length_y = np.int(model_dict['box_size']//model_dict['dy'])
# distance_x = np.zeros(box_length_x)
# distance_y = np.zeros(box_length_y)
nboxes=0
autocorr=0
npts=np.zeros(max_box_distance,dtype=np.int32)
distance_lists = np.zeros((max_box_distance,max_box_distance*max_boxes))
distance_ranges = np.zeros((3,max_box_distance))
distance_correlations = np.zeros((max_box_distance))
print('----> Info: Sub-boxes are '+str(box_length_x)+'x'+str(box_length_y)+' gridboxes in this model (nx x ny).')
for box_xstart in range(0,nlon+1-box_length_x,box_length_x):
box_xcentre = box_xstart+box_length_x//2
for box_ystart in range(0,nlat+1-box_length_y,box_length_y):
box_ycentre = box_ystart+box_length_y//2
central_precip = precip[:,box_ycentre,box_xcentre]
# for box_x in range(box_length_x):
# distance_x[box_x]=np.abs(box_xstart+box_x-box_xcentre)*np.cos(latitude[box_ycentre]*np.pi/180.)
# for box_y in range(box_length_y):
# distance_y[box_y]=np.abs(box_ystart+box_y-box_ycentre)*model_dict['dy']/float(model_dict['dx'])
for box_x in range(box_length_x):
for box_y in range(box_length_y):
#distance=np.int(round(np.sqrt(distance_x[box_x]*distance_x[box_x]+distance_y[box_y]*distance_y[box_y])))
# km_distance=np.sqrt(distance_x[box_x]*distance_x[box_x]+distance_y[box_y]*distance_y[box_y])*model_dict['dx']
km_distance = haversine((latitude[box_ycentre],longitude[box_xcentre]),(latitude[box_ystart+box_y],longitude[box_xstart+box_x]))
if km_distance < model_dict['box_size']:
distance = np.int(round(km_distance/model_dict['dx']))
distance_lists[distance,npts[distance]]=km_distance
remote_precip=precip[:,box_y+box_ystart,box_x+box_xstart]
if (box_x + box_xstart == box_xcentre) and (box_y + box_ystart == box_ycentre):
autocorr=autocorr+1
corr=np.corrcoef([central_precip.data,remote_precip.data])[1,0]
if not np.isnan(corr):
distance_correlations[distance]=distance_correlations[distance]+corr
npts[distance]=npts[distance]+1
nboxes = nboxes+1
if nboxes >= max_boxes :
raise Exception('ERROR: Number of sub-boxes ('+str(nboxes)+') exceeds maximum number of sub-boxes ('+str(max_boxes)+') exceeded. Increase size of sub-boxes (-b option) or increase parameter max_boxes in code.')
distance_max=0
for dist in range(max_box_distance):
if npts[dist] > 0 :
distance_correlations[dist]=distance_correlations[dist]/npts[dist]
distance_ranges[0,dist]=np.amin(distance_lists[dist,0:npts[dist]])
distance_ranges[1,dist]=np.median(distance_lists[dist,0:npts[dist]])
distance_ranges[2,dist]=np.amax(distance_lists[dist,0:npts[dist]])
distance_max=dist
else:
distance_correlations[dist]=-999
if distance_max==0:
print("ERROR: no points found within max_box_distance. Try expanding region size.")
print('---> Info: There are '+str(nboxes)+' sub-boxes in your input data.')
return(distance_correlations,distance_ranges,distance_max)
def compute_autocorr(precip,model_dict):
"""
Compute the lagged auto-correlation of precipitatio
across all points in the analysis domain.
Arguments:
* precip:
An iris cube of precipitation to analyse
* model_dict:
The dictionary of information about this dataset
Returns:
* time_correlations (max_timesteps):
Composite lagged auto-correlations across all points
* time_max (max_timesteps):
The maximum valid lag in the time_correlation array (can be
used as a subscript).
"""
print('---> Computing auto-correlations')
nlon=len(precip.coord('longitude').points)
nlat=len(precip.coord('latitude').points)
autocorr_length = model_dict['autocorr_length']
max_box_distance,max_boxes,max_timesteps = parameters()
# +1 to account for lag-zero correlation
autocorr_nt = np.int64(autocorr_length//(model_dict['dt']))+1
time_max = autocorr_nt
time_correlations = np.zeros(max_timesteps)
print('----> Info: Computing auto-correlations for '+str(autocorr_nt)+' lags.')
if autocorr_nt > max_timesteps:
raise Exception('Error: Number of lags for auto-correlation exceeds maximum ('+str(max_timesteps)+'). Increase parameter max_timesteps in code or reduce autocorrelation length.')
for lon in range(nlon):
for lat in range(nlat):
for lag in range(autocorr_nt):
this_precip = precip.data[:,lat,lon]
time_correlations[lag] = time_correlations[lag] + np.corrcoef(this_precip,np.roll(this_precip,lag,0))[1,0]
time_correlations = time_correlations / (nlon*nlat)
return(time_correlations,time_max)
def compute_spacetime_summary(precip,ndivs,twod=False,cyclic_lon=False,metrics_csv=None,short_name=None,wk_dir='.'):
"""
Computes summary metrics of spatial and temporal coherence,
as in Klingaman et al. (2017).
Method:
Precipitation data are binned into "ndivs" divisions (at each gridpoint).
The temporal coherence metric measures the relative frequency of
persistent upper- and lower-division precipitation to the relative
frequency of intermittent precipitation (upper-division then
lower-division, or lower-division then upper-division) on consecutive
timesteps at the same gridpoint.
The spatial coherence metric measures the relative frequency of
persistent upper- and lower-quartile precipitation to the relative
frequency of intermittent precipitation, using data at neighboring
gridpoints.
In Klingaman et al. (2017), the divisions are quartiles, but this
can be adjusted with the "ndivs" argument (see below).
Positive values of either measure indicate coherent precipitation.
Negative values of either measure indicate intermittent precipitation.
The function prints the individual upper- and lower-quartile metrics,
as well as the combined metric (the ratio). These values are only
printed to the screen; they are not plotted.
Arguments:
* precip:
An iris cube of precipitation to analyse
* ndivs:
The number of divisions for the spatial and coherence metrics.
Example: ndivs=4 computes the metrics based on upper-quartile and
lower-quartile precipitation.
Returns:
* space_inter:
The combined spatial coherence metric.
* time_inter:
The combined temporal coherence metric.
"""
print('---> Computing summary statistics for spatial and temporal intermittency')
nlon=precip.shape[2]
nlat=precip.shape[1]
lower_thresh = np.empty((nlat,nlon))
upper_thresh = np.empty((nlat,nlon))
for lon in range(nlon):
for lat in range(nlat):
this_precip = precip.data[:,lat,lon]
this_precip = this_precip[np.where(this_precip > 1)]
nt = np.size(this_precip)
if nt > ndivs:
precip_sorted = np.sort(this_precip)
lower_thresh[lat,lon] = precip_sorted[np.int32(np.floor(nt/ndivs))]
upper_thresh[lat,lon] = precip_sorted[np.int32(np.floor(nt*(ndivs-1)/float(ndivs)))]
else:
lower_thresh[lat,lon] = 0
upper_thresh[lat,lon] = 0
onon_count=0
onoff_count=0
offon_count=0
offoff_count=0
non=0
noff=0
for lon in range(nlon):
for lat in range(nlat):
this_precip = precip.data[:,lat,lon]
nt = np.size(this_precip)
for t in range(nt-1):
if this_precip[t] > 1:
if (this_precip[t] > upper_thresh[lat,lon]):
non=non+1
if (this_precip[t+1] < lower_thresh[lat,lon]):
onoff_count=onoff_count+1
elif (this_precip[t+1] > upper_thresh[lat,lon]):
onon_count=onon_count+1
elif (this_precip[t] < lower_thresh[lat,lon]):
noff=noff+1
if (this_precip[t+1] < lower_thresh[lat,lon]):
offoff_count=offoff_count+1
elif (this_precip[t+1] > upper_thresh[lat,lon]):
offon_count=offon_count+1
onon_count = onon_count/float(non)
offoff_count = offoff_count/float(noff)
onoff_count = onoff_count/float(non)
offon_count = offon_count/float(noff)
time_inter = 0.5*((onon_count+offoff_count)-(onoff_count+offon_count))
print('-----> Info: Temporal intermittency measure p(upper|upper): ',onon_count)
print('-----> Info: Temporal intermittency measure p(lower|lower): ',offoff_count)
print('-----> Info: Temporal intermittency measure p(upper|lower): ',offon_count)
print('-----> Info: Temporal intermittency measure p(lower|upper): ',onoff_count)
print('----> Info: Combined temporal intermittency measure: ',time_inter)
data_to_csv = {"Dataset": short_name,
"Temp p(upper|upper)": onon_count,
"Temp p(lower|lower)": offoff_count,
"Temp p(upper|lower)": offon_count,
"Temp p(lower|upper)": onoff_count,
"Temp combined": time_inter}
onon_count=0
onoff_count=0
offon_count=0
offoff_count=0
non=0
noff=0
for lon in range(1,nlon-1,3):
for lat in range(1,nlat-1,3):
for t in range(nt-1):
if (precip.data[t,lat,lon] > upper_thresh[lat,lon]):
onon_count = onon_count + np.sum(np.where(precip.data[t,lat-1:lat+2,lon-1:lon+2] > upper_thresh[lat,lon],1,0))
onoff_count = onoff_count + np.sum(np.where(precip.data[t,lat-1:lat+2,lon-1:lon+2] < lower_thresh[lat,lon],1,0))
non=non+1
if (precip.data[t,lat,lon] < lower_thresh[lat,lon] and precip.data[t,lat,lon] > 1):
offoff_count = offoff_count + np.sum(np.where(precip.data[t,lat-1:lat+2,lon-1:lon+2] < lower_thresh[lat,lon],1,0))
offon_count = offon_count + np.sum(np.where(precip.data[t,lat-1:lat+2,lon-1:lon+2] > upper_thresh[lat,lon],1,0))
noff=noff+1
onon_count = onon_count / float(non*8.0)
onoff_count = onoff_count / float(non*8.0)
offoff_count = offoff_count / float(noff*8.0)
offon_count = offon_count / float(noff*8.0)
space_inter = 0.5*((onon_count+offoff_count)-(onoff_count+offon_count))
print('-----> Info: Spatial intermittency measure p(upper|upper): ',onon_count)
print('-----> Info: Spatial intermittency measure p(lower|lower): ',offoff_count)
print('-----> Info: Spatial intermittency measure p(upper|lower): ',offon_count)
print('-----> Info: Spatial intermittency measure p(lower|upper): ',onoff_count)
print('----> Info: Combined spatial intermittency measure: ',space_inter)
spatial_metrics = {"Sp p(upper|upper)": onon_count,
"Sp p(lower|lower)": offoff_count,
"Sp p(upper|lower)": offon_count,
"Sp p(lower|upper)": onoff_count,
"Sp combined": space_inter}
data_to_csv.update(spatial_metrics)
# Write metrics to file for CMEC driver
if metrics_csv is not None:
field_names = ["Dataset",
"Temp p(upper|upper)",
"Temp p(lower|lower)",
"Temp p(upper|lower)",
"Temp p(lower|upper)",
"Temp combined",
"Sp p(upper|upper)",
"Sp p(lower|lower)",
"Sp p(upper|lower)",
"Sp p(lower|upper)",
"Sp combined"]
if not os.path.exists(metrics_csv):
with open(metrics_csv,"w") as fname:
csv_writer = csv.DictWriter(fname, fieldnames=field_names)
csv_writer.writeheader()
csv_writer.writerow(data_to_csv)
else:
with open(metrics_csv,"a") as fname:
csv_writer = csv.DictWriter(fname, fieldnames=field_names)
csv_writer.writerow(data_to_csv)
data_desc = {
os.path.basename(metrics_csv): {
"longname": os.path.basename(metrics_csv).split(".")[1].replace("_"," "),
"description": "Intermittency metrics organized by model"
}
}
update_output_json("metrics",data_desc,wk_dir)
return (space_inter,time_inter)
def plot_equalarea_corr(distance_correlations,distance_ranges,distance_max,model_dict=None,colors=None,legend_names=None,set_desc=None,legend=True,legend_location='lower left',wk_dir='.',ext='.ps'):
"""
Plots correlations as a function of physical distance from one or several datasets,
using correlation data from compute_equalarea_corr. The output is a line graph.
See Fig. 3a in Klingaman et al. (2017) for an example.
Note that the correlation at the central point (0 km) is not plotted, as this is 1.0 by definition.
Arguments:
* distance_correlations (n_datasets,max_box_distance) or (max_box_distance):
Composite correlations as a function of physical distance, averaged over
all sub-regions, as output from compute_equalarea_corr. If a 2D array, then
the routine assumes that the input contains > 1 sets of composite correlations
from multiple datasets.
* distance_ranges (n_datasets,3,max_box_distance) or (3,max_distance) :
For each bin of physical distance, the minimum, median and maximum distance
of points that fall into that bin, as output from compute_equalarea_corr. If
a 3D array, then the routine assumes that the input contains > 1 sets of
range values from multiple datasets.
* distance_max (n_datasets) or scalar:
The furthest distance bin for which the data in distance_corelations and
distance_ranges is valid, as output from compute_equalarea_corr. If a 1D
array, then the routine assumes that the input contains > 1 set of values
from multiple datasets.
Arguments that may be required (see below):
* model_dict:
The dictionary containing information about this dataset. Required only if plotting
data for one dataset.
* colors (n_datasets):
A list of line colors for each dataset. Required only if plotting data for more than
one dataset.
* legend_names (n_datasets):
A list of legend names for each dataset. Required only if plotting data for more than
one dataset.
* set_desc:
A string containing a description for this set of datasets. Used in output plot filename.
Required only if plotting data for more than one dataset.
Optional arguments:
* legend:
If set to True, include a legend on the graph. Default is True.
* legend_location
Location for the legend on the graph (e.g., 'lower left', 'upper right'). Default is 'lower left'.
"""
print('--> Plotting correlations vs. distance for all models.')
max_box_distance,max_boxes,max_timesteps = parameters()
if distance_correlations.ndim == 1:
if model_dict == None:
raise Exception('You are plotting correlations for only one dataset, but you have not specified a dataset dictionary with the model_dict option to plot_equalarea_corr.')
else:
colors=model_dict['color']
legend_names=model_dict['legend_names']
set_desc=model_dict['name']
nmodels=1
elif distance_correlations.ndim == 2:
nmodels=distance_correlations.shape[0]
if colors == None:
raise Exception('You are plotting correlations for more than one dataset, but you have not specified a list of plot colors with the colors option to plot_equalarea_corr.')
if legend_names == None:
raise Exception('You are plotting correlations for more than one dataset, but you have not specified a list of legend names with the legend_names option to plot_equalarea_corr.')
if set_desc == None:
raise Exception('You are plotting correlations for more than one dataset, but you have not specified a description for this dataset with the set_desc option to plot_equalarea_corr.')
else:
raise Exception('plot_equalarea_corr expects the distance_correlations argument to be either a one-dimensional (for only one dataset) or two-dimensional (for multiple datasets).')
plot_name = wk_dir+'/asop_coherence.'+set_desc.replace(" ","_")+'_precip_spatial_correlations'+ext
cfp.setvars(file=plot_name,text_fontsize=20,axis_label_fontsize=20,legend_text_size=18)
cfp.gopen()
if distance_correlations.ndim == 2:
dmax=np.amax(distance_ranges[:,2,:])
xmax=dmax*1.05
cfp.gset(xmin=0,xmax=xmax,ymin=-0.5,ymax=1.0)
for model in range(nmodels):
xpts=distance_ranges[model,1,1:distance_max[model]].flatten()
ypts=distance_correlations[model,1:distance_max[model]].flatten()
if model == nmodels-1 and legend:
cfp.lineplot(x=xpts,y=ypts,linestyle=':',marker='o',color=colors[model],markersize=8,label=legend_names[model],legend_location=legend_location,xticks=np.arange(0,dmax+1,dmax//10),yticks=np.round(np.arange(12)*0.1-0.1,1))
else:
print("dmax "+str(dmax))
print(dmax//10)
cfp.lineplot(x=xpts,y=ypts,linestyle=':',marker='o',color=colors[model],markersize=8,label=legend_names[model],xticks=np.arange(0,dmax+1,dmax//10),yticks=np.round(np.arange(12)*0.1-0.1,1))
for dist in range(1,distance_max[model]):
xpts=[distance_ranges[model,0,dist],distance_ranges[model,2,dist]]
ypts=[distance_correlations[model,dist],distance_correlations[model,dist]]
cfp.plotvars.plot.plot(xpts,ypts,linewidth=2,color=colors[model])
elif distance_correlations.ndim == 1:
dmax=np.amax(distance_ranges[2,:])
xmax=dmax*1.05
cfp.gset(xmin=0,xmax=xmax,ymin=-0.5,ymax=1.0)
xpts=distance_ranges[1,1:distance_max].flatten()
ypts=distance_correlations[1:distance_max].flatten()
if legend:
cfp.lineplot(x=xpts,y=ypts,linestyle=':',marker='o',color=colors,markersize=8,label=legend_names,legend_location=legend_location,
xticks=np.arange(0,dmax+1,dmax//10),yticks=np.round(np.arange(12)*0.1-0.1,1))
else:
cfp.lineplot(x=xpts,y=ypts,linestyle=':',marker='o',color=colors,markersize=8,label=legend_names,legend_location=legend_location,
xticks=np.arange(0,dmax+1,dmax//10),yticks=np.round(np.arange(12)*0.1-0.1,1))
for dist in range(1,distance_max):
xpts=[distance_ranges[0,dist],distance_ranges[2,dist]]
ypts=[distance_correlations[dist],distance_correlations[dist]]
cfp.plotvars.plot.plot(xpts,ypts,linewidth=2,color=colors)
cfp.plotvars.plot.plot([0,xmax],[0,0],linestyle=':',color='black')
# cfp.plotvars.plot.set_xticks(np.arange(0,xmax,max_box_size/10))
cfp.plotvars.plot.set_xticklabels(np.arange(0,dmax+1,dmax//10),fontsize=16)
# cfp.plotvars.plot.set_yticks(np.arange(12)*0.1-0.1)
cfp.plotvars.plot.set_yticklabels(np.round(np.arange(12)*0.1-0.1,1),fontsize=16)
cfp.plotvars.plot.set_xlabel('Distance from central gridpoint (km)',fontsize=20)
cfp.plotvars.plot.set_ylabel('Lag=0 correlation (mean of sub-regions)',fontsize=20)
cfp.gclose()
desc = {os.path.relpath(plot_name,wk_dir): {
"longname": os.path.basename(plot_name).split(".")[1].replace("_"," "),
"description": "Line plot of correlations as a function of physical distance"
}
}
update_output_json("plots",desc,wk_dir)
def plot_autocorr(time_correlations,time_max,dt=None,model_dict=None,colors=None,legend_names=None,set_desc=None,legend=True,legend_location='lower left',wk_dir='.',ext='.ps'):
"""
Plots correlations as a function of physical time from one or several datasets,
using lagged auto-correlation data from compute_autocorr. The output is a line graph.
See Fig. 3b in Klingaman et al. (2017) for an example.
Note that the lag-0 correlation is not plotted, as this is 1.0 by definition.
Arguments:
* time_correlations (n_datasets,max_timesteps) or (max_timesteps):
Composite correlations as a function of physical time, averaged over all points
in the analysis region, as output from compute_autocorr. If a 2D array, then
the routine assumes that the input contains > 1 sets of composite correlations
from multiple datasets.
* time_max (n_datasets) or scalar:
The longest lag for which the data is time_correlations is valid, as output from
compute_autocorr. If a 1D array, then the routine assumes that the input contains
> 1 sets of values from multiple datasets.
Arguments that may be required (see below):
* model_dict:
The dictionary containing information about this dataset. Required only if plotting
data for one dataset.
* dt (n_datasets):
An array containing the temporal sampling frequency for each input dataset. Required
only if plotting data for more than one dataset.
* colors (n_datasets):
A list of line colors for each dataset. Required only if plotting data for more than
one dataset.
* legend_names (n_datasets):
A list of legend names for each dataset. Required only if plotting data for more than
one dataset.
* set_desc:
A string containing a description for this set of datasets. Used in output plot filename.
Required only if plotting data for more than one dataset.
Optional arguments:
* legend:
If set to True, include a legend on the graph. Default is True.
* legend_location
Location for the legend on the graph (e.g., 'lower left', 'upper right'). Default is 'lower left'.
"""
print('--> Plotting correlations vs. time for all models.')
if time_correlations.ndim == 1:
if model_dict == None:
raise Exception('You are plotting correlations for only one dataset, but you have not specified a dataset dictionary with the model_dict option to plot_autocorr.')
else:
colors=model_dict['color']
if legend == True:
legend_names=model_dict['legend_names']
set_desc=model_dict['name']
dt = model_dict['dt']
nmodels=1
elif time_correlations.ndim == 2:
nmodels=time_correlations.shape[0]
if colors == None:
raise Exception('You are plotting correlations for more than one dataset, but you have not specified a list of plot colors with the colors option to plot_autocorr.')
if legend_names == None and legend == True:
raise Exception('You are plotting correlations for more than one dataset, but you have not specified a list of legend names with the legend_names option to plot_autocorr.')
if set_desc == None:
raise Exception('You are plotting correlations for more than one dataset, but you have not specified a description for this dataset with the set_desc option to plot_autocorr.')
if dt.all() == None:
raise Exception('You are plotting correlations for more than one dataset, but you have not specified an array of temporal sampling intervals with the dt option to plot_autocorr.')
else:
raise Exception('plot_autocorr expects the time_correlations argument to be either a one-dimensional (for only one dataset) or two-dimensional (for multiple datasets).')
plot_name = wk_dir+'/asop_coherence.'+set_desc.replace(" ","_")+'_precip_temporal_correlations'+ext
cfp.setvars(file=plot_name,text_fontsize=20,axis_label_fontsize=20,legend_text_size=18)
cfp.gopen()
if np.amax(dt) >= 86400:
dt_min = dt/86400.0
t_units='days'
else:
dt_min = dt/60
t_units='minutes'
tmax=np.amax((time_max-1)*dt_min)
xmax=tmax+np.amax(dt_min)*0.5
cfp.gset(xmin=0,xmax=xmax,ymin=-0.5,ymax=1.0)
if time_correlations.ndim == 2:
for model in range(nmodels):
xpts=(np.arange(time_max[model]))*dt_min[model]
ypts=time_correlations[model,0:time_max[model]]
print(xpts,ypts)
if model == nmodels-1 and legend:
cfp.lineplot(x=xpts[1:],y=ypts[1:],linestyle=':',marker='o',color=colors[model],markersize=8,label=legend_names[model],
xticks=np.arange(11)*tmax//10,yticks=np.round(np.arange(12)*0.1-0.1,1),legend_location=legend_location)
else:
cfp.lineplot(x=xpts[1:],y=ypts[1:],linestyle=':',marker='o',color=colors[model],markersize=8,label=legend_names[model],
xticks=np.arange(11)*tmax//10,yticks=np.round(np.arange(12)*0.1-0.1,1))
elif time_correlations.ndim == 1:
xpts=(np.arange(time_max))*dt_min
ypts=time_correlations[0:time_max]
cfp.lineplot(x=xpts[1:],y=ypts[1:],linestyle=':',marker='o',color=colors,markersize=8,label=legend_names,
xticks=np.arange(11)*tmax//10,yticks=np.round(np.arange(12)*0.1-0.1,1))
cfp.plotvars.plot.plot([0,xmax],[0,0],linestyle=':',color='black')
cfp.plotvars.plot.set_xticklabels(np.arange(11)*tmax//10,fontsize=16)
cfp.plotvars.plot.set_yticklabels(np.round(np.arange(12)*0.1-0.1,1),fontsize=16)
cfp.plotvars.plot.set_xlabel('Time ('+t_units+')',fontsize=20)
cfp.plotvars.plot.set_ylabel('Auto-correlation (mean of all points)',fontsize=20)
cfp.gclose()
desc = {os.path.relpath(plot_name,wk_dir): {
"longname": os.path.basename(plot_name).split(".")[1].replace("_"," "),
"description": "Line plot of correlations as a function of physical time using lagged auto-correlation data from compute_autocorr"
}
}
update_output_json("plots",desc,wk_dir)
|
|
############################################
#
# Author: Luca Cinquini
#
############################################
"""
Abstract
--------
The wps module of the OWSlib package provides client-side functionality for executing invocations to a remote Web Processing Server.
Disclaimer
----------
PLEASE NOTE: the owslib wps module should be considered in beta state: it has been tested versus only a handful of WPS services (deployed by the USGS, BADC and PML).
More extensive testing is needed and feedback is appreciated.
Usage
-----
The module can be used to execute three types of requests versus a remote WPS endpoint:
a) "GetCapabilities"
- use the method wps.getcapabilities(xml=None)
- the optional keyword argument "xml" may be used to avoid a real live request, and instead read the WPS capabilities document from a cached XML file
b) "DescribeProcess"
- use the method wps.describeprocess(identifier, xml=None)
- identifier is the process identifier, retrieved from the list obtained from a previous "GetCapabilities" invocation
- the optional keyword argument "xml" may be used to avoid a real live request, and instead read the WPS process description document from a cached XML file
c) "Execute"
- use the method wps.execute(identifier, inputs, output=None, request=None, response=None),
which submits the job to the remote WPS server and returns a WPSExecution object that can be used to periodically check the job status until completion
(or error)
- the optional keyword argument "request" may be used to avoid re-building the request XML from input arguments, and instead submit a request from a
pre-made XML file
- alternatively, an "Execute" request can be built from input arguments by supplying the "identifier", "inputs" and "output" arguments to the execute() method.
- "identifier" is the mandatory process identifier
- "inputs" is a dictionary of (key,value) pairs where:
- key is a named input parameter
- value is either a string, or any python object that supports a getXml() method
In particular, a few classes are included in the package to support a FeatuteCollection input:
- "WFSFeatureCollection" can be used in conjunction with "WFSQuery" to define a FEATURE_COLLECTION retrieved from a live WFS server.
- "GMLMultiPolygonFeatureCollection" can be used to define one or more polygons of (latitude, longitude) points.
- "output" is an optional output identifier to be included in the ResponseForm section of the request.
- the optional keyword argument "response" mey be used to avoid submitting a real live request, and instead reading the WPS execution response document
from a cached XML file (for debugging or testing purposes)
- the convenience module function monitorExecution() can be used to periodically check the status of a remote running job, and eventually download the output
either to a named file, or to a file specified by the server.
Examples
--------
The files examples/wps-usgs-script.py, examples/wps-pml-script-1.py and examples/wps-pml-script-2.py contain real-world usage examples
that submits a "GetCapabilities", "DescribeProcess" and "Execute" requests to the live USGS and PML servers. To run:
cd examples
python wps-usgs-script.py
python wps-pml-script-1.py
python wps-pml-script-2.py
The file wps-client.py contains a command-line client that can be used to submit a "GetCapabilities", "DescribeProcess" or "Execute"
request to an arbitratry WPS server. For example, you can run it as follows:
cd examples
To prints out usage and example invocations: wps-client -help
To execute a (fake) WPS invocation:
wps-client.py -v -u http://cida.usgs.gov/climate/gdp/process/WebProcessingService -r GetCapabilities -x ../tests/USGSCapabilities.xml
The directory tests/ includes several doctest-style files wps_*.txt that show how to interactively submit a
"GetCapabilities", "DescribeProcess" or "Execute" request, without making a live request but rather parsing the response of cached XML response documents. To run:
cd tests
python -m doctest wps_*.txt
(or python -m doctest -v wps_*.txt for verbose output)
Also, the directory tests/ contains several examples of well-formed "Execute" requests:
- The files wps_USGSExecuteRequest*.xml contain requests that can be submitted to the live USGS WPS service.
- The files PMLExecuteRequest*.xml contain requests that can be submitted to the live PML WPS service.
"""
from __future__ import (absolute_import, division, print_function)
from owscapable.etree import etree
from owscapable.ows import DEFAULT_OWS_NAMESPACE, ServiceIdentification, ServiceProvider, OperationsMetadata
from time import sleep
from owscapable.util import (testXMLValue, build_get_url, dump, getTypedValue,
getNamespace, element_to_string, nspath, openURL, nspath_eval, log)
from xml.dom.minidom import parseString
from owscapable.namespaces import Namespaces
# namespace definition
n = Namespaces()
# These static namespaces are DEPRECIATED. Please don't use them.
# No great way of printing a message since there are at the file level
WPS_DEFAULT_NAMESPACE = n.get_namespace("wps")
WFS_NAMESPACE = n.get_namespace("wfs")
OGC_NAMESPACE = n.get_namespace("ogc")
GML_NAMESPACE = n.get_namespace("gml")
DRAW_NAMESPACE = n.get_namespace("draw")
GML_SCHEMA_LOCATION = "http://schemas.opengis.net/gml/3.1.1/base/feature.xsd"
DRAW_SCHEMA_LOCATION = 'http://cida.usgs.gov/climate/derivative/xsd/draw.xsd'
WPS_DEFAULT_SCHEMA_LOCATION = 'http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd'
WPS_DEFAULT_VERSION = '1.0.0'
def get_namespaces():
ns = n.get_namespaces(["ogc","wfs","wps","gml","xsi","xlink"])
ns[None] = n.get_namespace("wps")
ns["ows"] = DEFAULT_OWS_NAMESPACE
return ns
namespaces = get_namespaces()
class IWebProcessingService():
"""
Abstract interface for an OGC Web Processing Service (WPS).
"""
url = property("""URL for the remote WPS server (string).""")
def getcapabilities(**kw):
"""
Makes a GetCapabilities request to the remote WPS server,
returns an XML document wrapped in a python file-like object.
"""
def describeprocess(**kw):
"""
Makes a DescribeProcess request to the remote WPS server,
returns a Process object containing all the process metadata.
"""
def execute(**kw):
"""
Submits an Execute request to the remote WPS server,
returns a WPSExecution object, which can be used to monitor the status of the job, and ultimately retrieve the result.
"""
class IComplexData():
"""
Abstract interface representing complex input object for a WPS request.
"""
def getXml(self):
"""
Method that returns the object data as an XML snippet,
to be inserted into the WPS request document sent to the server.
"""
class WebProcessingService(object):
"""
Class that contains client-side functionality for invoking an OGC Web Processing Service (WPS).
Implements IWebProcessingService.
"""
def __init__(self, url, version=WPS_DEFAULT_VERSION, username=None, password=None, verbose=False, skip_caps=False):
"""
Initialization method resets the object status.
By default it will execute a GetCapabilities invocation to the remote service,
which can be skipped by using skip_caps=True.
"""
# fields passed in from object initializer
self.url = url
self.username = username
self.password = password
self.version = version
self.verbose = verbose
# fields populated by method invocations
self._capabilities = None
self.identification = None
self.provider = None
self.operations=[]
self.processes=[]
if not skip_caps:
self.getcapabilities()
def getcapabilities(self, xml=None):
"""
Method that requests a capabilities document from the remote WPS server and populates this object's metadata.
keyword argument xml: local XML GetCapabilities document, prevents actual HTTP invocation.
"""
# read capabilities document
reader = WPSCapabilitiesReader(version=self.version, verbose=self.verbose)
if xml:
# read from stored XML file
self._capabilities = reader.readFromString(xml)
else:
self._capabilities = reader.readFromUrl(self.url, username=self.username, password=self.password)
log.debug(element_to_string(self._capabilities))
# populate the capabilities metadata obects from the XML tree
self._parseCapabilitiesMetadata(self._capabilities)
def describeprocess(self, identifier, xml=None):
"""
Requests a process document from a WPS service and populates the process metadata.
Returns the process object.
"""
# read capabilities document
reader = WPSDescribeProcessReader(version=self.version, verbose=self.verbose)
if xml:
# read from stored XML file
rootElement = reader.readFromString(xml)
else:
# read from server
rootElement = reader.readFromUrl(self.url, identifier)
log.info(element_to_string(rootElement))
# build metadata objects
return self._parseProcessMetadata(rootElement)
def execute(self, identifier, inputs, output=None, request=None, response=None):
"""
Submits a WPS process execution request.
Returns a WPSExecution object, which can be used to monitor the status of the job, and ultimately retrieve the result.
identifier: the requested process identifier
inputs: list of process inputs as (key, value) tuples (where value is either a string for LiteralData, or an object for ComplexData)
output: optional identifier for process output reference (if not provided, output will be embedded in the response)
request: optional pre-built XML request document, prevents building of request from other arguments
response: optional pre-built XML response document, prevents submission of request to live WPS server
"""
# instantiate a WPSExecution object
log.info('Executing WPS request...')
execution = WPSExecution(version=self.version, url=self.url, username=self.username, password=self.password, verbose=self.verbose)
# build XML request from parameters
if request is None:
requestElement = execution.buildRequest(identifier, inputs, output)
request = etree.tostring( requestElement )
execution.request = request
log.debug(request)
# submit the request to the live server
if response is None:
response = execution.submitRequest(request)
else:
response = etree.fromstring(response)
log.debug(etree.tostring(response))
# parse response
execution.parseResponse(response)
return execution
def _parseProcessMetadata(self, rootElement):
"""
Method to parse a <ProcessDescriptions> XML element and returned the constructed Process object
"""
processDescriptionElement = rootElement.find( 'ProcessDescription' )
process = Process(processDescriptionElement, verbose=self.verbose)
# override existing processes in object metadata, if existing already
found = False
for n, p in enumerate(self.processes):
if p.identifier==process.identifier:
self.processes[n]=process
found = True
# otherwise add it
if not found:
self.processes.append(process)
return process
def _parseCapabilitiesMetadata(self, root):
''' Sets up capabilities metadata objects '''
# use the WPS namespace defined in the document root
wpsns = getNamespace(root)
# loop over children WITHOUT requiring a specific namespace
for element in root:
# thie element's namespace
ns = getNamespace(element)
# <ows:ServiceIdentification> metadata
if element.tag.endswith('ServiceIdentification'):
self.identification=ServiceIdentification(element, namespace=ns)
if self.verbose==True:
dump(self.identification)
# <ows:ServiceProvider> metadata
elif element.tag.endswith('ServiceProvider'):
self.provider=ServiceProvider(element, namespace=ns)
if self.verbose==True:
dump(self.provider)
# <ns0:OperationsMetadata xmlns:ns0="http://www.opengeospatial.net/ows">
# <ns0:Operation name="GetCapabilities">
# <ns0:DCP>
# <ns0:HTTP>
# <ns0:Get xlink:href="http://ceda-wps2.badc.rl.ac.uk/wps?" xmlns:xlink="http://www.w3.org/1999/xlink" />
# </ns0:HTTP>
# </ns0:DCP>
# </ns0:Operation>
# ........
# </ns0:OperationsMetadata>
elif element.tag.endswith('OperationsMetadata'):
for child in element.findall( nspath('Operation', ns=ns) ):
self.operations.append( OperationsMetadata(child, namespace=ns) )
if self.verbose==True:
dump(self.operations[-1])
# <wps:ProcessOfferings>
# <wps:Process ns0:processVersion="1.0.0">
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles</ows:Identifier>
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles</ows:Title>
# </wps:Process>
# ......
# </wps:ProcessOfferings>
elif element.tag.endswith('ProcessOfferings'):
for child in element.findall( nspath('Process', ns=ns) ):
p = Process(child, verbose=self.verbose)
self.processes.append(p)
if self.verbose==True:
dump(self.processes[-1])
class WPSReader(object):
"""
Superclass for reading a WPS document into a lxml.etree infoset.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False):
self.version = version
self.verbose = verbose
def _readFromUrl(self, url, data, method='Get', username=None, password=None):
"""
Method to get and parse a WPS document, returning an elementtree instance.
url: WPS service base url.
data: GET: dictionary of HTTP (key, value) parameter pairs, POST: XML document to post
username, password: optional user credentials
"""
if method == 'Get':
# full HTTP request url
request_url = build_get_url(url, data)
log.debug(request_url)
# split URL into base url and query string to use utility function
spliturl=request_url.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username=username, password=password)
return etree.fromstring(u.read())
elif method == 'Post':
u = openURL(url, data, method='Post', username = username, password = password)
return etree.fromstring(u.read())
else:
raise Exception("Unrecognized HTTP method: %s" % method)
def readFromString(self, string):
"""
Method to read a WPS GetCapabilities document from an XML string.
"""
if not isinstance(string, str):
raise ValueError("Input must be of type string, not %s" % type(string))
return etree.fromstring(string)
class WPSCapabilitiesReader(WPSReader):
"""
Utility class that reads and parses a WPS GetCapabilities document into a lxml.etree infoset.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False):
# superclass initializer
super(WPSCapabilitiesReader,self).__init__(version=version, verbose=verbose)
def readFromUrl(self, url, username=None, password=None):
"""
Method to get and parse a WPS capabilities document, returning an elementtree instance.
url: WPS service base url, to which is appended the HTTP parameters: service, version, and request.
username, password: optional user credentials
"""
return self._readFromUrl(url,
{'service':'WPS', 'request':'GetCapabilities', 'version':self.version},
username=username, password=password)
class WPSDescribeProcessReader(WPSReader):
"""
Class that reads and parses a WPS DescribeProcess document into a etree infoset
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False):
# superclass initializer
super(WPSDescribeProcessReader,self).__init__(version=version, verbose=verbose)
def readFromUrl(self, url, identifier, username=None, password=None):
"""
Reads a WPS DescribeProcess document from a remote service and returns the XML etree object
url: WPS service base url, to which is appended the HTTP parameters: 'service', 'version', and 'request', and 'identifier'.
"""
return self._readFromUrl(url,
{'service':'WPS', 'request':'DescribeProcess', 'version':self.version, 'identifier':identifier},
username=username, password=password)
class WPSExecuteReader(WPSReader):
"""
Class that reads and parses a WPS Execute response document into a etree infoset
"""
def __init__(self, verbose=False):
# superclass initializer
super(WPSExecuteReader,self).__init__(verbose=verbose)
def readFromUrl(self, url, data={}, method='Get', username=None, password=None):
"""
Reads a WPS status document from a remote service and returns the XML etree object.
url: the URL to submit the GET/POST request to.
"""
return self._readFromUrl(url, data, method, username=username, password=password)
class WPSExecution():
"""
Class that represents a single WPS process executed on a remote WPS service.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, url=None, username=None, password=None, verbose=False):
# initialize fields
self.url = url
self.version = version
self.username = username
self.password = password
self.verbose = verbose
# request document
self.request = None
# last response document
self.response = None
# status fields retrieved from the response documents
self.process = None
self.serviceInstance = None
self.status = None
self.percentCompleted = 0
self.statusMessage = None
self.errors = []
self.statusLocation = None
self.dataInputs=[]
self.processOutputs=[]
def buildRequest(self, identifier, inputs=[], output=None):
"""
Method to build a WPS process request.
identifier: the requested process identifier
inputs: array of input arguments for the process.
- LiteralData inputs are expressed as simple (key,value) tuples where key is the input identifier, value is the value
- ComplexData inputs are express as (key, object) tuples, where key is the input identifier,
and the object must contain a 'getXml()' method that returns an XML infoset to be included in the WPS request
output: optional identifier if process output is to be returned as a hyperlink reference
"""
#<wps:Execute xmlns:wps="http://www.opengis.net/wps/1.0.0"
# xmlns:ows="http://www.opengis.net/ows/1.1"
# xmlns:xlink="http://www.w3.org/1999/xlink"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# service="WPS"
# version="1.0.0"
# xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd">
root = etree.Element(nspath_eval('wps:Execute', namespaces))
root.set('service', 'WPS')
root.set('version', WPS_DEFAULT_VERSION)
root.set(nspath_eval('xsi:schemaLocation', namespaces), '%s %s' % (namespaces['wps'], WPS_DEFAULT_SCHEMA_LOCATION) )
# <ows:Identifier>gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm</ows:Identifier>
identifierElement = etree.SubElement(root, nspath_eval('ows:Identifier', namespaces))
identifierElement.text = identifier
# <wps:DataInputs>
dataInputsElement = etree.SubElement(root, nspath_eval('wps:DataInputs', namespaces))
for (key,val) in inputs:
inputElement = etree.SubElement(dataInputsElement, nspath_eval('wps:Input', namespaces))
identifierElement = etree.SubElement(inputElement, nspath_eval('ows:Identifier', namespaces))
identifierElement.text = key
# Literal data
# <wps:Input>
# <ows:Identifier>DATASET_URI</ows:Identifier>
# <wps:Data>
# <wps:LiteralData>dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/conus_grid.w_meta.ncml</wps:LiteralData>
# </wps:Data>
# </wps:Input>
if isinstance(val, str):
dataElement = etree.SubElement(inputElement, nspath_eval('wps:Data', namespaces))
literalDataElement = etree.SubElement(dataElement, nspath_eval('wps:LiteralData', namespaces))
literalDataElement.text = val
# Complex data
# <wps:Input>
# <ows:Identifier>FEATURE_COLLECTION</ows:Identifier>
# <wps:Reference xlink:href="http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs">
# <wps:Body>
# <wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" service="WFS" version="1.1.0" outputFormat="text/xml; subtype=gml/3.1.1" xsi:schemaLocation="http://www.opengis.net/wfs ../wfs/1.1.0/WFS.xsd">
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
# </wfs:GetFeature>
# </wps:Body>
# </wps:Reference>
# </wps:Input>
else:
inputElement.append( val.getXml() )
# <wps:ResponseForm>
# <wps:ResponseDocument storeExecuteResponse="true" status="true">
# <wps:Output asReference="true">
# <ows:Identifier>OUTPUT</ows:Identifier>
# </wps:Output>
# </wps:ResponseDocument>
# </wps:ResponseForm>
if output is not None:
responseFormElement = etree.SubElement(root, nspath_eval('wps:ResponseForm', namespaces))
responseDocumentElement = etree.SubElement(responseFormElement, nspath_eval('wps:ResponseDocument', namespaces),
attrib={'storeExecuteResponse':'true', 'status':'true'} )
if isinstance(output, str):
self._add_output(responseDocumentElement, output, asReference=True)
elif isinstance(output, list):
for (identifier,as_reference) in output:
self._add_output(responseDocumentElement, identifier, asReference=as_reference)
else:
raise Exception('output parameter is neither string nor list. output=%s' % output)
return root
def _add_output(self, element, identifier, asReference=False):
outputElement = etree.SubElement(element, nspath_eval('wps:Output', namespaces),
attrib={'asReference':str(asReference).lower()} )
outputIdentifierElement = etree.SubElement(outputElement, nspath_eval('ows:Identifier', namespaces)).text = identifier
# wait for 60 seconds by default
def checkStatus(self, url=None, response=None, sleepSecs=60):
"""
Method to check the status of a job execution.
In the process, this method will upadte the object 'response' attribute.
url: optional 'statusLocation' URL retrieved from a previous WPS Execute response document.
If not provided, the current 'statusLocation' URL will be used.
sleepSecs: number of seconds to sleep before returning control to the caller.
"""
reader = WPSExecuteReader(verbose=self.verbose)
if response is None:
# override status location
if url is not None:
self.statusLocation = url
log.info('\nChecking execution status... (location=%s)' % self.statusLocation)
response = reader.readFromUrl(self.statusLocation, username=self.username, password=self.password)
else:
response = reader.readFromString(response)
# store latest response
self.response = etree.tostring(response)
log.debug(self.response)
self.parseResponse(response)
# sleep given number of seconds
if self.isComplete()==False:
log.info('Sleeping %d seconds...' % sleepSecs)
sleep(sleepSecs)
def getStatus(self):
return self.status
def isComplete(self):
if (self.status=='ProcessSucceeded' or self.status=='ProcessFailed' or self.status=='Exception'):
return True
elif (self.status=='ProcessStarted'):
return False
elif (self.status=='ProcessAccepted' or self.status=='ProcessPaused'):
return False
else:
raise Exception('Unknown process execution status: %s' % self.status)
def isSucceded(self):
if self.status=='ProcessSucceeded':
return True
else:
return False
def isNotComplete(self):
return not self.isComplete()
def getOutput(self, filepath=None):
"""
Method to write the outputs of a WPS process to a file:
either retrieves the referenced files from the server, or writes out the content of response embedded output.
filepath: optional path to the output file, otherwise a file will be created in the local directory with the name assigned by the server,
or default name 'wps.out' for embedded output.
"""
if self.isSucceded():
content = ''
for output in self.processOutputs:
output_content = output.retrieveData(self.username, self.password)
# ExecuteResponse contains reference to server-side output
if output_content is not "":
content = content + output_content
if filepath is None:
filepath = output.fileName
# ExecuteResponse contain embedded output
if len(output.data)>0:
if filepath is None:
filepath = 'wps.out'
for data in output.data:
content = content + data
# write out content
if content is not '':
out = open(filepath, 'wb')
out.write(content)
out.close()
log.info('Output written to file: %s' %filepath)
else:
raise Exception("Execution not successfully completed: status=%s" % self.status)
def submitRequest(self, request):
"""
Submits a WPS Execute document to a remote service, returns the XML response document from the server.
This method will save the request document and the first returned response document.
request: the XML request document to be submitted as POST to the server.
"""
self.request = request
reader = WPSExecuteReader(verbose=self.verbose)
response = reader.readFromUrl(self.url, request, method='Post', username=self.username, password=self.password)
self.response = response
return response
'''
if response is None:
# override status location
if url is not None:
self.statusLocation = url
else:
response = reader.readFromString(response)
'''
def parseResponse(self, response):
"""
Method to parse a WPS response document
"""
rootTag = response.tag.split('}')[1]
# <ns0:ExecuteResponse>
if rootTag == 'ExecuteResponse':
self._parseExecuteResponse(response)
# <ows:ExceptionReport>
elif rootTag == 'ExceptionReport':
self._parseExceptionReport(response)
else:
log.debug('Unknown Response')
# log status, errors
log.info('Execution status=%s' % self.status)
log.info('Percent completed=%s' % self.percentCompleted)
log.info('Status message=%s' % self.statusMessage)
for error in self.errors:
dump(error)
def _parseExceptionReport(self, root):
"""
Method to parse a WPS ExceptionReport document and populate this object's metadata.
"""
# set exception status, unless set already
if self.status is None:
self.status = "Exception"
for exceptionEl in root.findall( nspath('Exception', ns=namespaces['ows']) ):
self.errors.append( WPSException(exceptionEl) )
def _parseExecuteResponse(self, root):
"""
Method to parse a WPS ExecuteResponse response document and populate this object's metadata.
"""
# retrieve WPS namespace directly from root element
wpsns = getNamespace(root)
self.serviceInstance = root.get( 'serviceInstance' )
self.statusLocation = root.get( 'statusLocation' )
# <ns0:Status creationTime="2011-11-09T14:19:50Z">
# <ns0:ProcessSucceeded>PyWPS Process v.net.path successfully calculated</ns0:ProcessSucceeded>
# </ns0:Status>
# OR
# <ns0:Status creationTime="2011-11-07T08:26:44.359-06:00">
# <ns0:ProcessFailed>
# <ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1">
# <ows:Exception>
# <ows:ExceptionText>Attribute null not found in feature collection</ows:ExceptionText>
# </ows:Exception>
# </ows:ExceptionReport>
# </ns0:ProcessFailed>
# </ns0:Status>
statusEl = root.find( nspath('Status/*', ns=wpsns) )
self.status = statusEl.tag.split('}')[1]
# get progress info
try:
percentCompleted = int(statusEl.get('percentCompleted'))
self.percentCompleted = percentCompleted
except:
pass
# get status message
self.statusMessage = statusEl.text
# exceptions ?
for element in statusEl:
if element.tag.endswith('ExceptionReport'):
self._parseExceptionReport(element)
self.process = Process(root.find(nspath('Process', ns=wpsns)), verbose=self.verbose)
#<wps:DataInputs xmlns:wps="http://www.opengis.net/wps/1.0.0"
# xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xlink="http://www.w3.org/1999/xlink">
for inputElement in root.findall( nspath('DataInputs/Input', ns=wpsns) ):
self.dataInputs.append( Input(inputElement) )
if self.verbose==True:
dump(self.dataInputs[-1])
# <ns:ProcessOutputs>
# xmlns:ns="http://www.opengis.net/wps/1.0.0"
for outputElement in root.findall( nspath('ProcessOutputs/Output', ns=wpsns) ):
self.processOutputs.append( Output(outputElement) )
if self.verbose==True:
dump(self.processOutputs[-1])
class ComplexData(object):
"""
Class that represents a ComplexData element in a WPS document
"""
def __init__(self, mimeType=None, encoding=None, schema=None):
self.mimeType = mimeType
self.encoding = encoding
self.schema = schema
class InputOutput(object):
"""
Superclass of a WPS input or output data object.
"""
def __init__(self, element):
# loop over sub-elements without requiring a specific namespace
for subElement in element:
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">SUMMARIZE_TIMESTEP</ows:Identifier>
if subElement.tag.endswith('Identifier'):
self.identifier = testXMLValue( subElement )
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">Summarize Timestep</ows:Title>
elif subElement.tag.endswith('Title'):
self.title = testXMLValue( subElement )
# <ows:Abstract xmlns:ows="http://www.opengis.net/ows/1.1">If selected, processing output will include columns with summarized statistics for all feature attribute values for each timestep</ows:Abstract>
elif subElement.tag.endswith('Abstract'):
self.abstract = testXMLValue( subElement )
self.allowedValues = []
self.supportedValues = []
self.defaultValue = None
self.dataType = None
self.anyValue = False
def _parseData(self, element):
"""
Method to parse a "Data" element
"""
# <ns0:Data>
# <ns0:ComplexData mimeType="text/plain">
# 7504912.93758151 -764109.175074507,7750849.82379226 -22141.8611641468,8561828.42371234 -897195.923493867,7724946.16844165 -602984.014261927
# </ns0:ComplexData>
# </ns0:Data>
#nspath('Data', ns=WPS_NAMESPACE)
complexDataElement = element.find( nspath('ComplexData', ns=getNamespace(element)) )
if complexDataElement is not None:
self.dataType = "ComplexData"
def _parseLiteralData(self, element, literalElementName):
"""
Method to parse the LiteralData element.
"""
# <LiteralData>
# <ows:DataType ows:reference="xs:string" xmlns:ows="http://www.opengis.net/ows/1.1" />
# <ows:AllowedValues xmlns:ows="http://www.opengis.net/ows/1.1">
# <ows:Value>COMMA</ows:Value>
# <ows:Value>TAB</ows:Value>
# <ows:Value>SPACE</ows:Value>
# </ows:AllowedValues>
# <DefaultValue>COMMA</DefaultValue>
# </LiteralData>
# <LiteralData>
# <ows:DataType ows:reference="xs:anyURI" xmlns:ows="http://www.opengis.net/ows/1.1" />
# <ows:AnyValue xmlns:ows="http://www.opengis.net/ows/1.1" />
# </LiteralData>
literalDataElement = element.find( literalElementName )
if literalDataElement is not None:
self.dataType = 'LiteralData'
for subElement in literalDataElement:
subns = getNamespace(subElement)
if subElement.tag.endswith('DataType'):
self.dataType = subElement.get( nspath("reference", ns=subns) ).split(':')[1]
elif subElement.tag.endswith('AllowedValues'):
for value in subElement.findall( nspath('Value', ns=subns) ):
self.allowedValues.append( getTypedValue(self.dataType, value.text) )
elif subElement.tag.endswith('DefaultValue'):
self.defaultValue = getTypedValue(self.dataType, subElement.text)
elif subElement.tag.endswith('AnyValue'):
self.anyValue = True
def _parseComplexData(self, element, complexDataElementName):
"""
Method to parse a ComplexData or ComplexOutput element.
"""
# <ComplexData>
# <Default>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.0.0/feature.xsd</Schema>
# </Format>
# </Default>
# <Supported>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.0.0/feature.xsd</Schema>
# </Format>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.1.1/feature.xsd</Schema>
# </Format>
# </Supported>
# </ComplexData>
# OR
# <ComplexOutput defaultEncoding="UTF-8" defaultFormat="text/XML" defaultSchema="NONE">
# <SupportedComplexData>
# <Format>text/XML</Format>
# <Encoding>UTF-8</Encoding>
# <Schema>NONE</Schema>
# </SupportedComplexData>
# </ComplexOutput>
complexDataElement = element.find( complexDataElementName )
if complexDataElement is not None:
self.dataType = "ComplexData"
for supportedComlexDataElement in complexDataElement.findall( 'SupportedComplexData' ):
self.supportedValues.append( ComplexData( mimeType=testXMLValue( supportedComlexDataElement.find( 'Format' ) ),
encoding=testXMLValue( supportedComlexDataElement.find( 'Encoding' ) ),
schema=testXMLValue( supportedComlexDataElement.find( 'Schema' ) )
)
)
for formatElement in complexDataElement.findall( 'Supported/Format'):
self.supportedValues.append( ComplexData( mimeType=testXMLValue( formatElement.find( 'MimeType' ) ),
encoding=testXMLValue( formatElement.find( 'Encoding' ) ),
schema=testXMLValue( formatElement.find( 'Schema' ) )
)
)
defaultFormatElement = complexDataElement.find( 'Default/Format' )
if defaultFormatElement is not None:
self.defaultValue = ComplexData( mimeType=testXMLValue( defaultFormatElement.find( 'MimeType' ) ),
encoding=testXMLValue( defaultFormatElement.find( 'Encoding' ) ),
schema=testXMLValue( defaultFormatElement.find( 'Schema' ) )
)
class Input(InputOutput):
"""
Class that represents a WPS process input.
"""
def __init__(self, inputElement):
# superclass initializer
super(Input,self).__init__(inputElement)
# <Input maxOccurs="1" minOccurs="0">
# OR
# <MinimumOccurs>1</MinimumOccurs>
self.minOccurs = -1
if inputElement.get("minOccurs") is not None:
self.minOccurs = int( inputElement.get("minOccurs") )
if inputElement.find('MinimumOccurs') is not None:
self.minOccurs = int( testXMLValue( inputElement.find('MinimumOccurs') ) )
self.maxOccurs = -1
if inputElement.get("maxOccurs") is not None:
self.maxOccurs = int( inputElement.get("maxOccurs") )
if inputElement.find('MaximumOccurs') is not None:
self.maxOccurs = int( testXMLValue( inputElement.find('MaximumOccurs') ) )
# <LiteralData>
self._parseLiteralData(inputElement, 'LiteralData')
# <ComplexData>
self._parseComplexData(inputElement, 'ComplexData')
class Output(InputOutput):
"""
Class that represents a WPS process output.
"""
def __init__(self, outputElement):
# superclass initializer
super(Output,self).__init__(outputElement)
self.reference = None
self.mimeType = None
self.data = []
self.fileName = None
self.filePath = None
# extract wps namespace from outputElement itself
wpsns = getNamespace(outputElement)
# <ns:Reference encoding="UTF-8" mimeType="text/csv"
# href="http://cida.usgs.gov/climate/gdp/process/RetrieveResultServlet?id=1318528582026OUTPUT.601bb3d0-547f-4eab-8642-7c7d2834459e" />
referenceElement = outputElement.find( nspath('Reference', ns=wpsns) )
if referenceElement is not None:
self.reference = referenceElement.get('href')
self.mimeType = referenceElement.get('mimeType')
# <LiteralOutput>
self._parseLiteralData(outputElement, 'LiteralOutput')
# <ComplexData> or <ComplexOutput>
self._parseComplexData(outputElement, 'ComplexOutput')
# <Data>
# <ns0:Data>
# <ns0:ComplexData mimeType="text/plain">
# 7504912.93758151 -764109.175074507,7750849.82379226 -22141.8611641468,8561828.42371234 -897195.923493867,7724946.16844165 -602984.014261927
# </ns0:ComplexData>
# </ns0:Data>
# OR:
# <ns0:Data>
# <ns0:ComplexData encoding="UTF-8" mimeType="text/xml" schema="http://schemas.opengis.net/gml/2.1.2/feature.xsd">
# <ns3:FeatureCollection xsi:schemaLocation="http://ogr.maptools.org/ output_0n7ij9D.xsd" xmlns:ns3="http://ogr.maptools.org/">
# <gml:boundedBy xmlns:gml="http://www.opengis.net/gml">
# <gml:Box>
# <gml:coord><gml:X>-960123.1421801626</gml:X><gml:Y>4665723.56559387</gml:Y></gml:coord>
# <gml:coord><gml:X>-101288.6510608822</gml:X><gml:Y>5108200.011823481</gml:Y></gml:coord>
# </gml:Box>
# </gml:boundedBy>
# <gml:featureMember xmlns:gml="http://www.opengis.net/gml">
# <ns3:output fid="F0">
# <ns3:geometryProperty><gml:LineString><gml:coordinates>-960123.142180162365548,4665723.565593870356679,0 -960123.142180162365548,4665723.565593870356679,0 -960123.142180162598379,4665723.565593870356679,0 -960123.142180162598379,4665723.565593870356679,0 -711230.141176006174646,4710278.48552671354264,0 -711230.141176006174646,4710278.48552671354264,0 -623656.677859728806652,4848552.374973464757204,0 -623656.677859728806652,4848552.374973464757204,0 -410100.337491964863148,4923834.82589447684586,0 -410100.337491964863148,4923834.82589447684586,0 -101288.651060882242746,5108200.011823480948806,0 -101288.651060882242746,5108200.011823480948806,0 -101288.651060882257298,5108200.011823480948806,0 -101288.651060882257298,5108200.011823480948806,0</gml:coordinates></gml:LineString></ns3:geometryProperty>
# <ns3:cat>1</ns3:cat>
# <ns3:id>1</ns3:id>
# <ns3:fcat>0</ns3:fcat>
# <ns3:tcat>0</ns3:tcat>
# <ns3:sp>0</ns3:sp>
# <ns3:cost>1002619.181</ns3:cost>
# <ns3:fdist>0</ns3:fdist>
# <ns3:tdist>0</ns3:tdist>
# </ns3:output>
# </gml:featureMember>
# </ns3:FeatureCollection>
# </ns0:ComplexData>
# </ns0:Data>
dataElement = outputElement.find( nspath('Data', ns=wpsns) )
if dataElement is not None:
complexDataElement = dataElement.find( nspath('ComplexData', ns=wpsns) )
if complexDataElement is not None:
self.dataType = "ComplexData"
self.mimeType = complexDataElement.get('mimeType')
if complexDataElement.text is not None and complexDataElement.text.strip() is not '':
self.data.append(complexDataElement.text.strip())
for child in complexDataElement:
self.data.append(etree.tostring(child))
literalDataElement = dataElement.find( nspath('LiteralData', ns=wpsns) )
if literalDataElement is not None:
self.dataType = literalDataElement.get('dataType')
if literalDataElement.text is not None and literalDataElement.text.strip() is not '':
self.data.append(literalDataElement.text.strip())
def retrieveData(self, username=None, password=None):
"""
Method to retrieve data from server-side reference:
returns "" if the reference is not known.
username, password: credentials to access the remote WPS server
"""
url = self.reference
if url is None:
return ""
# a) 'http://cida.usgs.gov/climate/gdp/process/RetrieveResultServlet?id=1318528582026OUTPUT.601bb3d0-547f-4eab-8642-7c7d2834459e'
# b) 'http://rsg.pml.ac.uk/wps/wpsoutputs/outputImage-11294Bd6l2a.tif'
log.info('Output URL=%s' % url)
if '?' in url:
spliturl=url.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username = username, password = password)
# extract output filepath from URL query string
self.fileName = spliturl[1].split('=')[1]
else:
u = openURL(url, '', method='Get', username = username, password = password)
# extract output filepath from base URL
self.fileName = url.split('/')[-1]
return u.read()
def writeToDisk(self, path=None, username=None, password=None):
"""
Method to write an output of a WPS process to disk:
it either retrieves the referenced file from the server, or write out the content of response embedded output.
filepath: optional path to the output file, otherwise a file will be created in the local directory with the name assigned by the server,
username, password: credentials to access the remote WPS server
"""
# Check if ExecuteResponse contains reference to server-side output
content = self.retrieveData(username, password)
# ExecuteResponse contain embedded output
if content is "" and len(self.data)>0:
self.fileName = self.identifier
for data in self.data:
content = content + data
# write out content
if content is not "":
if self.fileName == "":
self.fileName = self.identifier
self.filePath = path + self.fileName
out = open(self.filePath, 'wb')
out.write(content)
out.close()
log.info('Output written to file: %s' %self.filePath)
class WPSException:
"""
Class representing an exception raised by a WPS.
"""
def __init__(self, root):
self.code = root.attrib.get("exceptionCode", None)
self.locator = root.attrib.get("locator", None)
textEl = root.find( nspath('ExceptionText', ns=getNamespace(root)) )
if textEl is not None:
self.text = textEl.text
else:
self.text = ""
class Process(object):
"""
Class that represents a WPS process.
"""
def __init__(self, elem, verbose=False):
""" Initialization method extracts all available metadata from an XML document (passed in as etree object) """
# <ns0:ProcessDescriptions service="WPS" version="1.0.0"
# xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsDescribeProcess_response.xsd"
# xml:lang="en-US" xmlns:ns0="http://www.opengis.net/wps/1.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
# OR:
# <ns0:Process ns0:processVersion="1.0.0">
self._root = elem
self.verbose = verbose
wpsns = getNamespace(elem)
# <ProcessDescription statusSupported="true" storeSupported="true" ns0:processVersion="1.0.0">
self.processVersion = elem.get( nspath('processVersion', ns=wpsns) )
self.statusSupported = bool( elem.get( "statusSupported" ) )
self.storeSupported = bool( elem.get( "storeSupported" ) )
for child in elem:
# this element's namespace
ns = getNamespace(child)
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm</ows:Identifier>
if child.tag.endswith('Identifier'):
self.identifier = testXMLValue( child )
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">Feature Weighted Grid Statistics</ows:Title>
elif child.tag.endswith('Title'):
self.title = testXMLValue( child )
# <ows:Abstract xmlns:ows="http://www.opengis.net/ows/1.1">This algorithm generates area weighted statistics of a gridded dataset for a set of vector polygon features. Using the bounding-box that encloses the feature data and the time range, if provided, a subset of the gridded dataset is requested from the remote gridded data server. Polygon representations are generated for cells in the retrieved grid. The polygon grid-cell representations are then projected to the feature data coordinate reference system. The grid-cells are used to calculate per grid-cell feature coverage fractions. Area-weighted statistics are then calculated for each feature using the grid values and fractions as weights. If the gridded dataset has a time range the last step is repeated for each time step within the time range or all time steps if a time range was not supplied.</ows:Abstract>
elif child.tag.endswith('Abstract'):
self.abstract = testXMLValue( child )
if self.verbose==True:
dump(self)
# <DataInputs>
self.dataInputs = []
for inputElement in elem.findall( 'DataInputs/Input' ):
self.dataInputs.append( Input(inputElement) )
if self.verbose==True:
dump(self.dataInputs[-1], prefix='\tInput: ')
# <ProcessOutputs>
self.processOutputs = []
for outputElement in elem.findall( 'ProcessOutputs/Output' ):
self.processOutputs.append( Output(outputElement) )
if self.verbose==True:
dump(self.processOutputs[-1], prefix='\tOutput: ')
class FeatureCollection():
'''
Base class to represent a Feature Collection used as input to a WPS request.
The method getXml() is invoked by the WPS execute() method to build the WPS request.
All subclasses must implement the getXml() method to provide their specific XML.
Implements IComplexData.
'''
def __init__(self):
pass
def getXml(self):
raise NotImplementedError
class WFSFeatureCollection(FeatureCollection):
'''
FeatureCollection specified by a WFS query.
All subclasses must implement the getQuery() method to provide the specific query portion of the XML.
'''
def __init__(self, wfsUrl, wfsQuery):
'''
wfsUrl: the WFS service URL
example: wfsUrl = "http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs"
wfsQuery : a WFS query instance
'''
self.url = wfsUrl
self.query = wfsQuery
# <wps:Reference xlink:href="http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs">
# <wps:Body>
# <wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" service="WFS" version="1.1.0" outputFormat="text/xml; subtype=gml/3.1.1" xsi:schemaLocation="http://www.opengis.net/wfs ../wfs/1.1.0/WFS.xsd">
# .......
# </wfs:GetFeature>
# </wps:Body>
# </wps:Reference>
def getXml(self):
root = etree.Element(nspath_eval('wps:Reference', namespaces), attrib = { nspath_eval("xlink:href",namespaces) : self.url} )
bodyElement = etree.SubElement(root, nspath_eval('wps:Body', namespaces))
getFeatureElement = etree.SubElement(bodyElement, nspath_eval('wfs:GetFeature', namespaces),
attrib = { "service":"WFS",
"version":"1.1.0",
"outputFormat":"text/xml; subtype=gml/3.1.1",
nspath_eval("xsi:schemaLocation",namespaces):"%s %s" % (namespaces['wfs'], '../wfs/1.1.0/WFS.xsd')})
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
getFeatureElement.append( self.query.getXml() )
return root
class WFSQuery():
'''
Class representing a WFS query, for insertion into a WFSFeatureCollection instance.
Implements IComplexData.
'''
def __init__(self, typeName, propertyNames=[], filters=[]):
self.typeName = typeName
self.propertyNames = propertyNames
self.filters = filters
def getXml(self):
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
queryElement = etree.Element(nspath_eval('wfs:Query', namespaces), attrib = { "typeName":self.typeName })
for propertyName in self.propertyNames:
propertyNameElement = etree.SubElement(queryElement, nspath_eval('wfs:PropertyName', namespaces))
propertyNameElement.text = propertyName
if len(self.filters)>0:
filterElement = etree.SubElement(queryElement, nspath_eval('ogc:Filter', namespaces))
for filter in self.filters:
gmlObjectIdElement = etree.SubElement(filterElement, nspath_eval('ogc:GmlObjectId', namespaces),
attrib={nspath_eval('gml:id', namespaces):filter})
return queryElement
class GMLMultiPolygonFeatureCollection(FeatureCollection):
'''
Class that represents a FeatureCollection defined as a GML multi-polygon.
'''
def __init__(self, polygons):
'''
Initializer accepts an array of polygons, where each polygon is an array of (lat,lon) tuples.
Example: polygons = [ [(-102.8184, 39.5273), (-102.8184, 37.418), (-101.2363, 37.418), (-101.2363, 39.5273), (-102.8184, 39.5273)],
[(-92.8184, 39.5273), (-92.8184, 37.418), (-91.2363, 37.418), (-91.2363, 39.5273), (-92.8184, 39.5273)] ]
'''
self.polygons = polygons
def getXml(self):
'''
<wps:Data>
<wps:ComplexData mimeType="text/xml" encoding="UTF-8"
schema="http://schemas.opengis.net/gml/3.1.1/base/feature.xsd">
<gml:featureMembers xmlns:ogc="http://www.opengis.net/ogc"
xmlns:draw="gov.usgs.cida.gdp.draw" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ows="http://www.opengis.net/ows" xmlns:gml="http://www.opengis.net/gml"
xmlns:xlink="http://www.w3.org/1999/xlink"
xsi:schemaLocation="gov.usgs.cida.gdp.draw http://cida.usgs.gov/climate/derivative/xsd/draw.xsd">
<gml:box gml:id="box.1">
<gml:the_geom>
<gml:MultiPolygon srsDimension="2"
srsName="http://www.opengis.net/gml/srs/epsg.xml#4326">
<gml:polygonMember>
<gml:Polygon>
<gml:exterior>
<gml:LinearRing>
<gml:posList>-102.8184 39.5273 -102.8184 37.418 -101.2363 37.418 -101.2363 39.5273 -102.8184 39.5273</gml:posList>
</gml:LinearRing>
</gml:exterior>
</gml:Polygon>
</gml:polygonMember>
</gml:MultiPolygon>
</gml:the_geom>
<gml:ID>0</gml:ID>
</gml:box>
</gml:featureMembers>
</wps:ComplexData>
</wps:Data>
'''
dataElement = etree.Element(nspath_eval('wps:Data', namespaces))
complexDataElement = etree.SubElement(dataElement, nspath_eval('wps:ComplexData', namespaces),
attrib={"mimeType":"text/xml", "encoding":"UTF-8", "schema":GML_SCHEMA_LOCATION} )
featureMembersElement = etree.SubElement(complexDataElement, nspath_eval('gml:featureMembers', namespaces),
attrib={ nspath_eval("xsi:schemaLocation",namespaces):"%s %s" % (DRAW_NAMESPACE, DRAW_SCHEMA_LOCATION)})
boxElement = etree.SubElement(featureMembersElement, nspath_eval('gml:box', namespaces), attrib={ nspath_eval("gml:id",namespaces):"box.1" })
geomElement = etree.SubElement(boxElement, nspath_eval('gml:the_geom', namespaces))
multiPolygonElement = etree.SubElement(geomElement, nspath_eval('gml:MultiPolygon', namespaces),
attrib={"srsDimension":"2", "srsName":"http://www.opengis.net/gml/srs/epsg.xml#4326"} )
for polygon in self.polygons:
polygonMemberElement = etree.SubElement(multiPolygonElement, nspath_eval('gml:polygonMember', namespaces))
polygonElement = etree.SubElement(polygonMemberElement, nspath_eval('gml:Polygon', namespaces))
exteriorElement = etree.SubElement(polygonElement, nspath_eval('gml:exterior', namespaces))
linearRingElement = etree.SubElement(exteriorElement, nspath_eval('gml:LinearRing', namespaces))
posListElement = etree.SubElement(linearRingElement, nspath_eval('gml:posList', namespaces))
posListElement.text = ' '.join(["%s %s" % (x, y) for x, y in polygon[:] ])
idElement = etree.SubElement(boxElement, nspath_eval('gml:ID', namespaces))
idElement.text = "0"
return dataElement
def monitorExecution(execution, sleepSecs=3, download=False, filepath=None):
'''
Convenience method to monitor the status of a WPS execution till it completes (succesfully or not),
and write the output to file after a succesfull job completion.
execution: WPSExecution instance
sleepSecs: number of seconds to sleep in between check status invocations
download: True to download the output when the process terminates, False otherwise
filepath: optional path to output file (if downloaded=True), otherwise filepath will be inferred from response document
'''
while execution.isComplete()==False:
execution.checkStatus(sleepSecs=sleepSecs)
log.info('Execution status: %s' % execution.status)
if execution.isSucceded():
if download:
execution.getOutput(filepath=filepath)
else:
for output in execution.processOutputs:
if output.reference is not None:
log.info('Output URL=%s' % output.reference)
else:
for ex in execution.errors:
log.error('Error: code=%s, locator=%s, text=%s' % (ex.code, ex.locator, ex.text))
def printValue(value):
'''
Utility method to format a value for printing.
'''
# ComplexData type
if isinstance(value, ComplexData):
return "mimeType=%s, encoding=%s, schema=%s" % (value.mimeType, value.encoding, value.schema)
# other type
else:
return value
def printInputOutput(value, indent=''):
'''
Utility method to inspect an input/output element.
'''
# InputOutput fields
print('%s identifier=%s, title=%s, abstract=%s, data type=%s' % (indent, value.identifier, value.title, value.abstract, value.dataType))
for val in value.allowedValues:
print('%s Allowed Value: %s' % (indent, printValue(val)))
if value.anyValue:
print(' Any value allowed')
for val in value.supportedValues:
print('%s Supported Value: %s' % (indent, printValue(val)))
print('%s Default Value: %s ' % (indent, printValue(value.defaultValue)))
# Input fields
if isinstance(value, Input):
print('%s minOccurs=%d, maxOccurs=%d' % (indent, value.minOccurs, value.maxOccurs))
# Output fields
if isinstance(value, Output):
print('%s reference=%s, mimeType=%s' % (indent, value.reference, value.mimeType))
for datum in value.data:
print('%s Data Value: %s' % (indent, printValue(datum)))
|
|
''' A handler for loading sequenced assets from binaries & turtle metadata. '''
import logging
import os
import re
import sys
import yaml
from rdflib import Graph, Literal, Namespace, URIRef
from classes import pcdm
from classes.exceptions import ConfigException, DataReadException
from namespaces import bibo, dc, dcmitype, dcterms, fabio, pcdmuse, rdf
#============================================================================
# DATA LOADING FUNCTION
#============================================================================
def load(repo, batch_config):
return Batch(repo, batch_config)
#============================================================================
# BATCH CLASS (FOR PAGED BINARIES PLUS RDF METADATA)
#============================================================================
class Batch():
'''Iterator class representing a set of resources to be loaded'''
def __init__(self, repo, config):
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
# Check for required configuration items and set up paths
required_keys = ['HANDLER',
'COLLECTION',
'ROOT',
'MAPFILE',
'LOG_LOCATION',
'LOG_CONFIG',
'METADATA_FILE',
'DATA_PATH'
]
for key in required_keys:
if not config.get(key):
raise ConfigException(
'Missing required key {0} in batch config'.format(key)
)
# Set configuration Properties
self.local_path = os.path.normpath(config.get('ROOT'))
self.data_path = os.path.join(self.local_path,
config['DATA_PATH'])
self.metadata_file = os.path.join(self.local_path,
config.get('METADATA_FILE'))
self.collection = pcdm.Collection.from_repository(repo,
config.get('COLLECTION'))
# Create data structures to accumulate process results
self.incomplete = []
self.extra_files = []
# Check for required metadata file
if not os.path.isfile(self.metadata_file):
raise ConfigException('Specified metadata file could not be found')
# check for an existing file index
file_index = os.path.join(self.local_path, 'file_index.yml')
if os.path.isfile(file_index):
self.logger.info('Found file index in {0}'.format(file_index))
with open(file_index, 'r') as index:
self.all_files = yaml.load(index)
else:
# Generate index of all files in the data path
# maps the basename to a full path
self.logger.info(
'Walking the {0} tree to create a file index'.format(self.data_path))
self.all_files = {}
file_count = 0;
for root, dirs, files in os.walk(self.data_path):
for f in files:
file_count += 1
if f not in self.all_files:
self.all_files[f] = [os.path.join(root, f)]
else:
self.all_files[f].append(os.path.join(root, f))
self.logger.info("Found {0} files with {1} unique filenames"
.format(file_count, len(self.all_files)))
# save index to file
with open(file_index, 'w') as index:
yaml.dump(self.all_files, index, default_flow_style=False)
with open(self.metadata_file, 'r') as f:
self.logger.info(
'Parsing the master metadata graph in {0}'.format(self.metadata_file))
self.master_graph = Graph().parse(f, format="turtle")
self.subjects = sorted(set(self.master_graph.subjects()))
self.length = len(self.subjects)
self.count = 0
self.logger.info("Batch contains {0} items.".format(self.length))
def __iter__(self):
return self
def __next__(self):
if self.count < self.length:
subject = self.subjects[self.count]
item_graph = Graph()
for triple in self.master_graph.triples((subject, None, None)):
item_graph.add(triple)
item = Item.from_graph(item_graph, self.all_files)
item.path = str(subject)
item.add_collection(self.collection)
self.count += 1
return item
else:
self.logger.info('Processing complete!')
raise StopIteration()
#============================================================================
# ITEM CLASS
#============================================================================
class Item(pcdm.Item):
'''Class representing a paged repository item resource'''
@classmethod
def from_graph(cls, graph, all_files):
item = cls(title=next(graph.objects(predicate=dcterms.title)))
item.src_graph = graph
item.all_files = all_files
return item
def __init__(self, title=None, files=None, parts=None):
super(Item, self).__init__()
self.src_graph = None
self.title = title
self.filepaths = files
self.parts = parts
self.sequence_attr = ('Page', 'id')
def read_data(self):
files = []
parts = {}
if self.src_graph is not None:
# Parse each filename in hasPart and allocate to correct location in item entry
for part in self.src_graph.objects(predicate=dcterms.hasPart):
filename = str(part)
# ensure exactly one path that is mapped from the basename
if not filename in self.all_files:
raise DataReadException('File {0} not found'.format(filename))
elif len(self.all_files[filename]) > 1:
raise DataReadException('Filename {0} is not unique'.format(f))
file_path = self.all_files[filename][0]
normalized = filename.replace('_', '-')
basename, ext = os.path.splitext(normalized)
base_parts = basename.split('-')
# handle files with no sequence id
if len(base_parts) == 2:
files.append(file_path)
# handle files with a sequence id
elif len(base_parts) == 3:
page_no = str(int(base_parts[2]))
if page_no not in parts:
parts[page_no] = [ file_path ]
else:
parts[page_no].append(file_path)
else:
item.logger.warning(
'Filename {0} does not match a known pattern'.format(filename))
# remove the dcterms:hasPart triples
self.src_graph.remove((None, dcterms.hasPart, part))
for path in files:
self.add_file(File.from_localpath(path))
# renumber the parts from 1
for (n, key) in enumerate(sorted(parts.keys()), 1):
self.add_component(Page(n, parts[key], self))
def graph(self):
graph = super(Item, self).graph()
if self.src_graph is not None:
for (s, p, o) in self.src_graph:
graph.add((self.uri, p, o))
return graph
#============================================================================
# PAGE (COMPONENT) CLASS
#============================================================================
class Page(pcdm.Component):
'''Class representing one page of an item-level resource'''
def __init__(self, id, files, item):
super().__init__()
self.id = str(id)
self.title = "{0}, Page {1}".format(item.title, self.id)
self.ordered = True
for f in files:
self.add_file(File.from_localpath(f))
def graph(self):
graph = super(Page, self).graph()
graph.add((self.uri, dcterms.title, Literal(self.title)))
graph.add((self.uri, rdf.type, fabio.Page))
graph.add((self.uri, fabio.hasSequenceIdentifier, Literal(self.id)))
return graph
#============================================================================
# FILE CLASS
#============================================================================
class File(pcdm.File):
'''Class representing file associated with an item or page resource'''
def graph(self):
graph = super(File, self).graph()
graph.add((self.uri, dcterms.title, Literal(self.title)))
graph.add((self.uri, dcterms.type, dcmitype.Text))
if self.filename.endswith('.tif'):
graph.add((self.uri, rdf.type, pcdmuse.PreservationMasterFile))
elif self.filename.endswith('.jpg'):
graph.add((self.uri, rdf.type, pcdmuse.IntermediateFile))
elif self.filename.endswith('.xml'):
graph.add((self.uri, rdf.type, pcdmuse.ExtractedText))
elif self.filename.endswith('.txt'):
graph.add((self.uri, rdf.type, pcdmuse.ExtractedText))
return graph
|
|
# Original project at https://github.com/psychomario/pyinject
# The project is licensed under the terms of the MIT license; see
# accompanying LICENSE.md for details.
import ctypes
import ctypes.wintypes as wintypes
import platform
import binascii
import os
wintypes.LPTSTR = ctypes.POINTER(ctypes.c_char)
wintypes.LPBYTE = ctypes.POINTER(ctypes.c_ubyte)
wintypes.HANDLE = ctypes.c_void_p
wintypes.LPDWORD = ctypes.POINTER(wintypes.DWORD)
wintypes.LPCTSTR = ctypes.POINTER(ctypes.c_char)
wintypes.PHANDLE = ctypes.POINTER(wintypes.HANDLE)
class __LUID(ctypes.Structure):
"""see:
http://msdn.microsoft.com/en-us/library/windows/desktop/aa379261(v=vs.85).aspx
"""
_fields_ = [("LowPart", wintypes.DWORD),
("HighPart", wintypes.LONG),]
wintypes.LUID = __LUID
wintypes.PLUID = wintypes.POINTER(wintypes.LUID)
class __LUID_AND_ATTRIBUTES(ctypes.Structure):
"""see:
http://msdn.microsoft.com/en-us/library/windows/desktop/aa379263(v=vs.85).aspx
"""
_fields_ = [("Luid", wintypes.LUID),
("Attributes", wintypes.DWORD),]
wintypes.LUID_AND_ATTRIBUTES = __LUID_AND_ATTRIBUTES
wintypes.PLUID_AND_ATTRIBUTES = wintypes.POINTER(wintypes.LUID_AND_ATTRIBUTES)
class __TOKEN_PRIVILEGES(ctypes.Structure):
"""see:
http://msdn.microsoft.com/en-us/library/windows/desktop/aa379630(v=vs.85).aspx
"""
_fields_ = [("PrivilegeCount", wintypes.DWORD),
("Privileges", wintypes.LUID_AND_ATTRIBUTES),]
wintypes.TOKEN_PRIVILEGES = __TOKEN_PRIVILEGES
wintypes.PTOKEN_PRIVILEGES = wintypes.POINTER(wintypes.TOKEN_PRIVILEGES)
class __STARTUPINFO(ctypes.Structure):
"""see:
http://msdn.microsoft.com/en-us/library/windows/desktop/ms686331(v=vs.85).aspx
"""
_fields_ = [("cb", wintypes.DWORD),
("lpReserved", wintypes.LPTSTR),
("lpDesktop", wintypes.LPTSTR),
("lpTitle", wintypes.LPTSTR),
("dwX", wintypes.DWORD),
("dwY", wintypes.DWORD),
("dwXSize", wintypes.DWORD),
("dwYSize", wintypes.DWORD),
("dwXCountChars", wintypes.DWORD),
("dwYCountChars", wintypes.DWORD),
("dwFillAttribute",wintypes.DWORD),
("dwFlags", wintypes.DWORD),
("wShowWindow", wintypes.WORD),
("cbReserved2", wintypes.WORD),
("lpReserved2", wintypes.LPBYTE),
("hStdInput", wintypes.HANDLE),
("hStdOutput", wintypes.HANDLE),
("hStdError", wintypes.HANDLE),]
wintypes.STARTUPINFO = __STARTUPINFO
wintypes.LPSTARTUPINFO = wintypes.POINTER(wintypes.STARTUPINFO)
class __PROCESS_INFORMATION(ctypes.Structure):
"""see:
http://msdn.microsoft.com/en-us/library/windows/desktop/ms684873(v=vs.85).aspx
"""
_fields_ = [("hProcess", wintypes.HANDLE),
("hThread", wintypes.HANDLE),
("dwProcessId", wintypes.DWORD),
("dwThreadId", wintypes.DWORD),]
wintypes.PROCESS_INFORMATION = __PROCESS_INFORMATION
wintypes.LPPROCESS_INFORMATION = wintypes.POINTER(wintypes.PROCESS_INFORMATION)
class __SYSTEM_MODULE_INFORMATION(ctypes.Structure):
_fields_ = [("ModuleCount", wintypes.ULONG),
("WhoCares", ctypes.c_void_p * 2),
("BaseAddress", ctypes.c_void_p),
("Size", wintypes.ULONG),
("MoarStuff", wintypes.ULONG),
("MoarMoar", wintypes.USHORT),
("HeyThere", wintypes.USHORT),
("Pwned", wintypes.USHORT),
("W00t", wintypes.USHORT),
("ImageName", ctypes.c_char * 256),]
wintypes.SYSTEM_MODULE_INFORMATION = __SYSTEM_MODULE_INFORMATION
wintypes.PSYSTEM_MODULE_INFORMATION = wintypes.POINTER(wintypes.SYSTEM_MODULE_INFORMATION)
class __IMAGE_DOS_HEADER(ctypes.Structure):
_fields_ = [("e_magic", wintypes.WORD),
("e_cblp", wintypes.WORD),
("e_cp", wintypes.WORD),
("e_crlc", wintypes.WORD),
("e_cparhdr", wintypes.WORD),
("e_minalloc", wintypes.WORD),
("e_maxalloc", wintypes.WORD),
("e_ss", wintypes.WORD),
("e_sp", wintypes.WORD),
("e_csum", wintypes.WORD),
("e_ip", wintypes.WORD),
("e_cs", wintypes.WORD),
("e_lfarlc", wintypes.WORD),
("e_ovno", wintypes.WORD),
("e_res", wintypes.WORD * 4),
("e_oemid", wintypes.WORD),
("e_oeminfo", wintypes.WORD),
("e_res2", wintypes.WORD * 10),
("e_lfanew", wintypes.LONG),]
wintypes.IMAGE_DOS_HEADER = __IMAGE_DOS_HEADER
wintypes.PIMAGES_DOS_HEADER = wintypes.POINTER(wintypes.IMAGE_DOS_HEADER)
class __IMAGE_FILE_HEADER(ctypes.Structure):
_fields_ = [("Machine", wintypes.WORD),
("NumberOfSections", wintypes.WORD),
("TimeDateStamp", wintypes.DWORD),
("PointerToSymbolTable", wintypes.DWORD),
("NumberOfSymbols", wintypes.DWORD),
("SizeOfOptionalHeader", wintypes.WORD),
("Characteristics", wintypes.WORD),]
wintypes.IMAGE_FILE_HEADER = __IMAGE_FILE_HEADER
wintypes.PIMAGE_FILE_HEADER = wintypes.POINTER(wintypes.IMAGE_FILE_HEADER)
class __IMAGE_DATA_DIRECTORY(ctypes.Structure):
_fields_ = [("VirtualAddress", wintypes.DWORD),
("Size", wintypes.DWORD),]
wintypes.IMAGE_DATA_DIRECTORY = __IMAGE_DATA_DIRECTORY
wintypes.PIMAGE_DATA_DIRECTORY = wintypes.POINTER(wintypes.IMAGE_DATA_DIRECTORY)
class __IMAGE_OPTIONAL_HEADER(ctypes.Structure):
_fields_ = [("Magic", wintypes.WORD),
("MajorLinkerVersion", wintypes.BYTE),
("MinorLinkerVersion", wintypes.BYTE),
("SizeOfCode", wintypes.DWORD),
("SizeOfInitializedData", wintypes.DWORD),
("SizeOfUninitializedData", wintypes.DWORD),
("AddressOfEntryPoint", wintypes.DWORD),
("BaseOfCode", wintypes.DWORD),
("BaseOfData", wintypes.DWORD),
("ImageBase", wintypes.DWORD),
("SectionAlignment", wintypes.DWORD),
("FileAlignment", wintypes.DWORD),
("MajorOperatingSystemVersion", wintypes.WORD),
("MinorOperatingSystemVersion", wintypes.WORD),
("MajorImageVersion", wintypes.WORD),
("MinorImageVersion", wintypes.WORD),
("MajorSubsystemVersion", wintypes.WORD),
("MinorSubsystemVersion", wintypes.WORD),
("Win32VersionValue", wintypes.DWORD),
("SizeOfImage", wintypes.DWORD),
("SizeOfHeaders", wintypes.DWORD),
("CheckSum", wintypes.DWORD),
("Subsystem", wintypes.WORD),
("DllCharacteristics", wintypes.WORD),
("SizeOfStackReserve", wintypes.DWORD),
("SizeOfStackCommit", wintypes.DWORD),
("SizeOfHeapReserve", wintypes.DWORD),
("SizeOfHeapCommit", wintypes.DWORD),
("LoaderFlags", wintypes.DWORD),
("NumberOfRvaAndSizes", wintypes.DWORD),
("DataDirectory", wintypes.IMAGE_DATA_DIRECTORY * 16),]
wintypes.IMAGE_OPTIONAL_HEADER = __IMAGE_OPTIONAL_HEADER
wintypes.PIMAGE_OPTIONAL_HEADER = wintypes.POINTER(wintypes.IMAGE_OPTIONAL_HEADER)
class __IMAGE_NT_HEADER(ctypes.Structure):
_fields_ = [("Signature", wintypes.DWORD),
("FileHeader", wintypes.IMAGE_FILE_HEADER),
("OptionalHeader", wintypes.IMAGE_OPTIONAL_HEADER),]
wintypes.IMAGE_NT_HEADER = __IMAGE_NT_HEADER
wintypes.PIMAGE_NT_HEADER = wintypes.POINTER(wintypes.IMAGE_NT_HEADER)
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [("nLength", wintypes.DWORD),
("lpSecurityDescriptor", wintypes.LPVOID),
("bInheritHandle", wintypes.BOOL)]
LPSECURITY_ATTRIBUTES = wintypes.POINTER(SECURITY_ATTRIBUTES)
wintypes.LPTHREAD_START_ROUTINE = wintypes.LPVOID
class Process():
"""This class can be used for dll or shellcode injection.
Process(pid=pid)
This will attach to process with pid=pid assuming
you have proper privileges
Process(pe=path)
Starts the executable at path
self.inject(dllpath)
Injects dll at dllpath
self.injectshellcode(shellcode)
Injects raw shellcode in the string shellcode
self.terminate(code)
This will terminate the process in use regardless of where it was
started from. code is the exit code"""
def __init__(self, pid=None, pe=None, handle=None):
self.kernel32 = ctypes.windll.kernel32
self.PROCESS_ALL_ACCESS = (0x000F0000L|0x00100000L|0xFFF)
self.SE_DEBUG_NAME = "SeDebugPrivilege"
self.TOKEN_ADJUST_PRIVILEGES = 0x20
self.SE_PRIVILEGE_ENABLED = 0x00000002
self.request_debug_privileges()
if pid: #attach to current file
self.kernel32.OpenProcess.restype = wintypes.HANDLE
self.kernel32.OpenProcess.argtypes = [ wintypes.DWORD,
wintypes.BOOL,
wintypes.DWORD ]
result = self.handle = self.kernel32.OpenProcess(
self.PROCESS_ALL_ACCESS,
False,
pid
)
self.get_last_error("OpenProcess", result)
self.pid = pid
elif pe: #create new process
startupinfo = wintypes.STARTUPINFO()
process_information = wintypes.PROCESS_INFORMATION()
startupinfo.dwFlags = 0x1
startupinfo.wShowWindow = 0x0
startupinfo.cb = ctypes.sizeof(startupinfo)
self.kernel32.CreateProcessA.restype = wintypes.BOOL
self.kernel32.CreateProcessA.argtypes = [ wintypes.LPCSTR,
wintypes.LPTSTR,
LPSECURITY_ATTRIBUTES,
LPSECURITY_ATTRIBUTES,
wintypes.BOOL,
wintypes.DWORD,
wintypes.LPVOID,
wintypes.LPCTSTR,
wintypes.LPSTARTUPINFO,
wintypes.LPPROCESS_INFORMATION ]
result = self.kernel32.CreateProcessA(
pe,
None,
None,
None,
True,
0,
None,
None,
ctypes.byref(startupinfo),
ctypes.byref(process_information)
)
self.get_last_error("CreateProcessA", result)
if result == 0 :
print "CreateProcessA Failed!"
return None
self.handle = process_information.hProcess
self.pid = process_information.dwProcessId
elif handle:
self.handle = handle
self.pid = None
else:
return None
self.arch = platform.architecture()[0][:2]
if self.arch == 32:
self.addrlen = 4
else:
self.addrlen = 8
def get_last_error(self, desc, val):
return # Comment out the return to see return and error values
print "%s=0x%x, GetCurrentError=0x%x (%d)" % (desc, val, self.kernel32.GetLastError(), self.kernel32.GetLastError())
def request_debug_privileges(self):
"""Adds SeDebugPrivilege to current process for various needs"""
privs = wintypes.LUID()
ctypes.windll.advapi32.LookupPrivilegeValueA.restype = wintypes.BOOL
ctypes.windll.advapi32.LookupPrivilegeValueA.argtypes = [ wintypes.LPCTSTR,
wintypes.LPCTSTR,
wintypes.PLUID ]
result = ctypes.windll.advapi32.LookupPrivilegeValueA(
None,
self.SE_DEBUG_NAME,
ctypes.byref(privs)
)
self.get_last_error("LookupPrivilegeValueA",result)
token = wintypes.TOKEN_PRIVILEGES(
1,
wintypes.LUID_AND_ATTRIBUTES(
privs,
self.SE_PRIVILEGE_ENABLED
)
)
hToken = wintypes.HANDLE()
ctypes.windll.advapi32.OpenProcessToken.restype = wintypes.BOOL
ctypes.windll.advapi32.OpenProcessToken.argtypes = [ wintypes.HANDLE,
wintypes.DWORD,
wintypes.PHANDLE ]
result = ctypes.windll.advapi32.OpenProcessToken(
wintypes.HANDLE(self.kernel32.GetCurrentProcess()),
self.TOKEN_ADJUST_PRIVILEGES,
ctypes.byref(hToken)
)
self.get_last_error("OpenProcessToken",result)
ctypes.windll.advapi32.AdjustTokenPrivileges.restype = wintypes.BOOL
ctypes.windll.advapi32.AdjustTokenPrivileges.argtypes = [ wintypes.HANDLE,
wintypes.BOOL,
wintypes.PTOKEN_PRIVILEGES,
wintypes.DWORD,
wintypes.PTOKEN_PRIVILEGES,
wintypes.LPDWORD ]
result = ctypes.windll.advapi32.AdjustTokenPrivileges(
hToken,
False,
ctypes.byref(token),
0x0,
None,
None
)
self.get_last_error("AdjustTokenPrivileges",result)
ctypes.windll.kernel32.CloseHandle.restype = wintypes.BOOL
ctypes.windll.kernel32.CloseHandle.argtypes = [ wintypes.HANDLE ]
result = ctypes.windll.kernel32.CloseHandle(hToken)
self.get_last_error("CloseHandle", result)
def inject(self,dllpath):
"""This function injects dlls the smart way
specifying stack rather than pushing and calling"""
dllpath = os.path.abspath(dllpath)
self.kernel32.GetModuleHandleA.restype = wintypes.HANDLE
self.kernel32.GetModuleHandleA.argtypes = [ wintypes.LPCTSTR ]
ModuleHandle = self.kernel32.GetModuleHandleA("kernel32.dll")
self.get_last_error("GetModuleHandle",ModuleHandle)
self.kernel32.GetProcAddress.restype = wintypes.LPVOID
self.kernel32.GetProcAddress.argtypes = [ wintypes.HANDLE, wintypes.LPCSTR ]
LoadLibraryA = self.kernel32.GetProcAddress(
wintypes.HANDLE(ModuleHandle),
"LoadLibraryA")
self.get_last_error("GetProcAddress", LoadLibraryA);
self.kernel32.VirtualAllocEx.restype = wintypes.LPVOID
self.kernel32.VirtualAllocEx.argtypes = [ wintypes.HANDLE,
wintypes.LPVOID,
ctypes.c_size_t,
wintypes.DWORD,
wintypes.DWORD ]
RemotePage = self.kernel32.VirtualAllocEx(
self.handle,
None,
len(dllpath)+1,
0x1000, # MEM_COMMIT
0x40 # PAGE_EXECUTE_READWRITE
)
self.get_last_error("VirtualAllocEx", RemotePage)
self.kernel32.WriteProcessMemory.restype = wintypes.BOOL
self.kernel32.WriteProcessMemory.argtypes = [ wintypes.HANDLE,
wintypes.LPVOID,
wintypes.LPCVOID,
ctypes.c_size_t,
ctypes.POINTER(ctypes.c_size_t) ]
result = self.kernel32.WriteProcessMemory(
self.handle,
RemotePage,
dllpath,
len(dllpath),
None
)
self.get_last_error("WriteProcessMemory",result)
self.kernel32.CreateRemoteThread.restype = wintypes.HANDLE
self.kernel32.CreateRemoteThread.argtypes = [ wintypes.HANDLE,
LPSECURITY_ATTRIBUTES,
ctypes.c_size_t,
wintypes.LPTHREAD_START_ROUTINE,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.LPVOID ]
RemoteThread = self.kernel32.CreateRemoteThread(
self.handle,
None,
0,
LoadLibraryA,
RemotePage,
0,
None
)
self.get_last_error("CreateRemoteThread",RemoteThread)
self.kernel32.WaitForSingleObject.restype = wintypes.DWORD
self.kernel32.WaitForSingleObject.argtypes = [ wintypes.HANDLE, wintypes.DWORD ]
# Wait 10 seconds then barrel on...
result = self.kernel32.WaitForSingleObject(
RemoteThread,
10*1000 # 10 seconds. -1 for infinite
)
self.get_last_error("WaitForSingleObject",result)
exitcode = wintypes.DWORD(0)
self.kernel32.GetExitCodeThread.restype = wintypes.BOOL
self.kernel32.GetExitCodeThread.argtypes = [ wintypes.HANDLE, wintypes.LPDWORD ]
result = self.kernel32.GetExitCodeThread(
RemoteThread,
ctypes.byref(exitcode)
)
self.get_last_error("GetExitCodeThread",result)
# print "exitcode = %s" % str(exitcode)
self.kernel32.VirtualFreeEx.restype = wintypes.BOOL
self.kernel32.VirtualFreeEx.argtypes = [ wintypes.HANDLE,
wintypes.LPVOID,
ctypes.c_size_t,
wintypes.DWORD ]
result = self.kernel32.VirtualFreeEx(
self.handle,
RemotePage,
0, # Size. Must be 0 for MEM_RELEASE
0x8000 # MEM_RELEASE
)
self.get_last_error("VirtualFreeEx",result)
return exitcode.value
def injectshellcode(self, shellcode):
"""This function merely executes what it is given"""
self.kernel32.VirtualAllocEx.restype = wintypes.LPVOID
self.kernel32.VirtualAllocEx.argtypes = [ wintypes.HANDLE,
wintypes.LPVOID,
ctypes.c_size_t,
wintypes.DWORD,
wintypes.DWORD ]
shellcodeaddress = self.kernel32.VirtualAllocEx(
self.handle,
None,
len(shellcode),
0x1000, # MEM_COMMIT
0x40 # PAGE_EXECUTE_READWRITE
)
self.get_last_error("VirtualAllocEx", shellcodeaddress)
self.kernel32.WriteProcessMemory.restype = wintypes.BOOL
self.kernel32.WriteProcessMemory.argtypes = [ wintypes.HANDLE,
wintypes.LPVOID,
wintypes.LPCVOID,
ctypes.c_size_t,
ctypes.POINTER(ctypes.c_size_t) ]
result = self.kernel32.WriteProcessMemory(
self.handle,
shellcodeaddress,
shellcode,
len(shellcode),
None
)
self.get_last_error("WriteProcessMemory", result);
self.kernel32.CreateRemoteThread.restype = wintypes.HANDLE
self.kernel32.CreateRemoteThread.argtypes = [ wintypes.HANDLE,
LPSECURITY_ATTRIBUTES,
ctypes.c_size_t,
wintypes.LPTHREAD_START_ROUTINE,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.LPVOID ]
thread = self.kernel32.CreateRemoteThread(
self.handle,
None,
0,
shellcodeaddress,
None,
0,
None
)
self.get_last_error("CreateRemoteThread", thread);
def injectshellcodefromfile(self, file, bzipd=False):
"""This function merely executes what it is given as a raw file"""
fh=open(file,'rb')
shellcode=fh.read()
fh.close()
if bzipd:
import bz2
shellcode=bz2.decompress(shellcode)
self.injectshellcode(shellcode)
def terminate(self, code=0):
"""This function terminates the process from the current handle"""
self.kernel32.TerminateProcess.restype = wintypes.BOOL
self.kernel32.TerminateProcess.argtypes = [wintypes.HANDLE, wintypes.UINT]
result = self.kernel32.TerminateProcess(
self.handle,
code
)
self.get_last_error("TerminateProcess",result)
self.kernel32.CloseHandle(self.handle)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from froide.helper.auth_migration_util import USER_DB_NAME
APP_MODEL, APP_MODEL_NAME = 'account.User', 'account.user'
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FoiRequest.summary'
db.add_column(u'foirequest_foirequest', 'summary',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FoiRequest.summary'
db.delete_column(u'foirequest_foirequest', 'summary')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
APP_MODEL_NAME: {
'Meta': {'object_name': 'User', 'db_table': "'%s'" % USER_DB_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'foirequest.deferredmessage': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'DeferredMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mail': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']", 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'foirequest.foiattachment': {
'Meta': {'ordering': "('name',)", 'object_name': 'FoiAttachment'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'belongs_to': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiMessage']", 'null': 'True'}),
'can_approve': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'converted': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'original_set'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['foirequest.FoiAttachment']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_converted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_redacted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'redacted': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'unredacted_set'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['foirequest.FoiAttachment']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'foirequest.foievent': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'FoiEvent'},
'context_json': ('django.db.models.fields.TextField', [], {}),
'event_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.PublicBody']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % APP_MODEL, 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'foirequest.foimessage': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'FoiMessage'},
'content_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_escalation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_postal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_response': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'not_publishable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'original': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'plaintext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'plaintext_redacted': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_public_body': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'received_messages'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['publicbody.PublicBody']"}),
'redacted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']"}),
'sender_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sender_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sender_public_body': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'send_messages'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['publicbody.PublicBody']"}),
'sender_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % APP_MODEL, 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'subject_redacted': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'})
},
u'foirequest.foirequest': {
'Meta': {'ordering': "('last_message',)", 'object_name': 'FoiRequest'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'costs': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_message': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_foi': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.Jurisdiction']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'last_message': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.FoiLaw']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.PublicBody']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'refusal_reason': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'resolution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resolved_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'same_as': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'same_as_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'secret_address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % APP_MODEL, 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'visibility': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
u'foirequest.publicbodysuggestion': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'PublicBodySuggestion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.PublicBody']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % APP_MODEL, 'null': 'True', 'on_delete': 'models.SET_NULL'})
},
u'foirequest.taggedfoirequest': {
'Meta': {'object_name': 'TaggedFoiRequest'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'foirequest_taggedfoirequest_items'", 'to': u"orm['taggit.Tag']"})
},
u'publicbody.foilaw': {
'Meta': {'object_name': 'FoiLaw'},
'combined': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['publicbody.FoiLaw']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.Jurisdiction']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'letter_end': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'letter_start': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'max_response_time': ('django.db.models.fields.IntegerField', [], {'default': '30', 'null': 'True', 'blank': 'True'}),
'max_response_time_unit': ('django.db.models.fields.CharField', [], {'default': "'day'", 'max_length': '32', 'blank': 'True'}),
'mediator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mediating_laws'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['publicbody.PublicBody']", 'blank': 'True', 'null': 'True'}),
'meta': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '3'}),
'refusal_reasons': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'request_note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'publicbody.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rank': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'publicbody.publicbody': {
'Meta': {'ordering': "('name',)", 'object_name': 'PublicBody'},
'_created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'public_body_creators'", 'on_delete': 'models.SET_NULL', 'default': '1', 'to': u"orm['%s']" % APP_MODEL, 'blank': 'True', 'null': 'True'}),
'_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'public_body_updaters'", 'on_delete': 'models.SET_NULL', 'default': '1', 'to': u"orm['%s']" % APP_MODEL, 'blank': 'True', 'null': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classification': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'classification_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.Jurisdiction']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'laws': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['publicbody.FoiLaw']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'number_of_requests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'other_names': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['publicbody.PublicBody']", 'blank': 'True', 'null': 'True'}),
'request_note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'descendants'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['publicbody.PublicBody']", 'blank': 'True', 'null': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.PublicBodyTopic']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'website_dump': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'publicbody.publicbodytopic': {
'Meta': {'object_name': 'PublicBodyTopic'},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['foirequest']
|
|
# Copyright 2009-2010 by Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sends subject updates to users per their subscriptions.
Accesses the Account data structure to retrieve subject updates for each user,
then sends out information to each user per their subscription settings.
format_email_subject(subdomain, frequency): generates an e-mail subject line
get_timedelta(account, subject): returns a text frequency as a timedelta
fetch_updates(): returns a dictionary of updated values for the given subject
format_update(update, locale): translates and formats a particular update
send_email(): sends an e-mail with the supplied information
update_account_alert_time(): updates an account's next_%freq%_alert time
EmailFormatter: base class to create formatted text for e-mail updates
HospitalEmailFormatter(EmailFormatter): extension for formatting specific to
hospital e-mail updates
MailAlerts(utils.Handler): handler class to send e-mail updates
"""
# TODO(kpy): Add an end-to-end test for the subscription system as a whole.
__author__ = 'pfritzsche@google.com (Phil Fritzsche)'
import datetime
import logging
import os
from copy import deepcopy
from operator import itemgetter
from google.appengine.api import mail
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.runtime import DeadlineExceededError
from google.appengine.runtime.apiproxy_errors import OverQuotaError
import cache
import model
import utils
from feedlib.xml_utils import Struct
from model import Account, PendingAlert, Subject, Subscription
from utils import _, format, get_last_updated_time, order_and_format_updates
# Set up localization.
ROOT = os.path.dirname(__file__)
from django.conf import settings
try:
settings.configure()
except:
pass
settings.LANGUAGE_CODE = 'en'
settings.USE_I18N = True
settings.LOCALE_PATHS = (os.path.join(ROOT, 'locale'),)
import django.utils.translation
FREQUENCY_TO_TIMEDELTA = {
'instant': datetime.timedelta(0),
'daily': datetime.timedelta(1),
'weekly': datetime.timedelta(7)
}
def format_email_subject(subdomain, frequency):
"""Given a subdomain and frequency, formats an appropriate subject line for
an update e-mail."""
frequency = _(str(frequency.title()))
subject = '%s %s: %s' % (
subdomain.title(),
#i18n: subject of e-mail -> Resource Finder Update
utils.to_unicode(_('Resource Finder %s Update' % frequency)),
utils.to_local_isotime_day(datetime.datetime.now()))
return subject
def get_timedelta(frequency, now=None):
"""Given a text frequency, converts it to a timedelta."""
if frequency in FREQUENCY_TO_TIMEDELTA:
return FREQUENCY_TO_TIMEDELTA[frequency]
elif frequency == 'monthly':
if not now:
now = datetime.datetime.now()
next_month = datetime.datetime(now.year + (now.month / 12),
(now.month % 12) + 1, 1, now.hour,
now.minute, now.second, now.microsecond)
return next_month - now
return None
def fetch_updates(alert, subject):
"""For a given alert and subject, finds any updated values.
Returns:
A list of dictionary mappings of attributes and their updated
values, including the attribute, the old/new values, and the
most recent author to update the value. Example:
[(attribute, {'old_value': value_foo,
'new_value': value_bar,
'author': author_foo})]
"""
if not (alert and subject):
return []
updated_attrs = []
old_values = alert.dynamic_properties()
for attribute in old_values:
value = subject.get_value(attribute)
author = subject.get_author_nickname(attribute)
alert_val = getattr(alert, attribute)
if value != alert_val:
updated_attrs.append({'attribute': attribute,
'old_value': alert_val,
'new_value': value,
'author': author})
return updated_attrs
def format_update(update, locale):
"""Insures that the attribute and old/new values of an update are translated
and properly formatted."""
update['attribute'] = utils.get_message('attribute_name',
update['attribute'],
locale)
update['new_value'] = format(update['new_value'], True)
update['old_value'] = format(update['old_value'], True)
return update
def send_email(locale, sender, to, subject, body, format):
"""Sends a single e-mail update.
Args:
locale: the locale whose language to use for the email
sender: the e-mail address of the person sending the e-mail
to: the user to send the update to
subject: the subject line of the e-mail
body: the text/html to use as the body of the e-mail
format: the form [text or html] the body is in
"""
django.utils.translation.activate(locale)
message = mail.EmailMessage()
message.sender = sender
message.to = to
message.subject = subject
if format == 'html':
message.html = body
else:
message.body = body
message.send()
def update_account_alert_time(account, frequency, now=None, initial=False):
"""Updates a particular account to send an alert at the appropriate
later date, depending on the given frequency.
Args:
account: the account whose time is to be updated
frequency: used to determine how much to update by
initial: (optional) tells the function to check if this is the first
time setting the account's update times"""
if not now:
now = datetime.datetime.now()
new_time = now + get_timedelta(frequency, now)
if (getattr(account, 'next_%s_alert' % frequency) == model.MAX_DATE or
not initial):
setattr(account, 'next_%s_alert' % frequency, new_time)
class EmailFormatter:
"""Base class to format update e-mails.
Attributes:
email_format: the preferred e-mail format for this account
locale: the account's locale
Methods:
__init__: constructor; requires the user's account
format_body: formats the body of an e-mail according to the account's
local and e-mail format preferences
format_plain_body: formats a generic plain text e-mail update
format_html_body: placeholder; override in subclass for HTML formatting
"""
def __init__(self, account):
self.email_format = account.email_format
self.locale = account.locale
def format_body(self, data):
if self.email_format == 'html':
return self.format_html_body(data)
else:
return self.format_plain_body(data)
def format_plain_body(self, data):
"""Forms the plain text body for an e-mail. Expects the data to be input
in the following format:
Struct(
date=datetime
changed_subjects={subject_key: (subject_title, (attribute,
{'old_value': value_foo,
'new_value': value_bar,
'author': author_foo}))})
"""
body = u''
for subject_name in data.changed_subjects:
(subject_title, updates) = data.changed_subjects[subject_name]
subject = Subject.get_by_key_name(subject_name)
subdomain = subject.get_subdomain()
subject_type = cache.SUBJECT_TYPES[subdomain][subject.type]
updates = order_and_format_updates(updates, subject_type,
self.locale, format_update)
body += 'UPDATE %s (%s)\n\n' % (subject_title, subject.get_name())
for update in updates:
body += '%s: %s\n-- %s: %s. %s: %s\n' % (
update['attribute'],
utils.to_unicode(format(update['new_value'], True)),
#i18n: old value for the attribute
utils.to_unicode(_('Previous value')),
utils.to_unicode(format(update['old_value'], True)),
#i18n: who the attribute was updated by
utils.to_unicode(_('Updated by')),
utils.to_unicode(update['author']))
body += '\n'
return body
def format_html_body(self, data):
"""Placeholder function. Requires override by subclass [example in
HospitalEmailFormatter].
Returns NotImplementedErrors if not overridden."""
raise NotImplementedError
class HospitalEmailFormatter(EmailFormatter):
"""Class to format update e-mails for hospital subject types.
Methods:
format_html_body: formats an HTML e-mail update
"""
def format_html_body(self, data):
"""Forms the HTML body for an e-mail. Expects the data to be input in
the same format as in format_plain_body(), with the optional addition of
an 'unchanged_subjects' field of the Struct. These will be displayed at
the bottom of the HTML e-mail for the purposes of digest information.
The 'unchanged_subjects' field, if present, should be a list of
subject names, i.e.:
[ subject_name1, subject_name2, subject_name3, ... ]
"""
changed_subjects = []
for subject_name in data.changed_subjects:
subject = Subject.get_by_key_name(subject_name)
subdomain, no_subdomain_name = subject_name.split(':')
subject_type = cache.SUBJECT_TYPES[subdomain][subject.type]
updates = order_and_format_updates(
data.changed_subjects[subject_name][1], subject_type,
self.locale, format_update)
changed_subjects.append({
'name': subject_name,
'no_subdomain_name': no_subdomain_name,
'title': format(subject.get_value('title')),
'address': format(subject.get_value('address')),
'contact_number': format(subject.get_value('phone')),
'contact_email': format(subject.get_value('email')),
'available_beds': format(subject.get_value('available_beds')),
'total_beds': format(subject.get_value('total_beds')),
'last_updated': format(get_last_updated_time(subject)),
'changed_vals': updates
})
changed_subjects = sorted(changed_subjects, key=itemgetter('title'))
unchanged_subjects = []
if 'unchanged_subjects' in data:
for subject in data.unchanged_subjects:
subject_name = subject.key().name()
no_subdomain_name = subject_name.split(':')[1]
unchanged_subjects.append({
'name': subject_name,
'no_subdomain_name': no_subdomain_name,
'title': subject.get_value('title'),
'address': format(subject.get_value('address')),
'contact_number': format(subject.get_value('phone')),
'contact_email': format(subject.get_value('email')),
'available_beds': format(
subject.get_value('available_beds')),
'total_beds': format(subject.get_value('total_beds')),
'last_updated': format(get_last_updated_time(subject))
})
unchanged_subjects = sorted(unchanged_subjects,
key=itemgetter('title'))
template_values = {
'nickname': data.nickname,
'domain': data.domain,
'subdomain': data.subdomain,
'changed_subjects': changed_subjects,
'unchanged_subjects': unchanged_subjects
}
path = os.path.join(os.path.dirname(__file__),
'templates/hospital_email.html')
return template.render(path, template_values)
EMAIL_FORMATTERS = {
'haiti': {
'hospital': HospitalEmailFormatter
},
'pakistan': {
'hospital': HospitalEmailFormatter
}
}
class MailAlerts(utils.Handler):
"""Handler for /mail_alerts. Used to handle e-mail update sending.
Attributes:
action: the specific action to be taken by the class
Methods:
init(): handles initialization tasks for the class
post(): responds to HTTP POST requests
update_and_add_pending_alerts(): queues up future digest alerts and
sends out instant updates; called when a subject is changed
send_digests(): sends out a digest update for the specified frequency
"""
def init(self):
"""Handles any useful initialization tasks for the class."""
self.appspot_email = 'updates@%s' % (
self.get_parent_domain().replace('appspot.com', 'appspotmail.com'))
self.action = self.request.get('action')
# Calls made from taskqueue don't have a 'Host' attribute in the headers
# dictionary. Domain must be parsed out from the full url.
self.domain = self.request.url[
:self.request.url.find(self.request.path)]
def post(self):
"""Responds to HTTP POST requests. This function either (queues up
future daily/weekly/monthly updates and sends instant updates) or
(sends out daily/weekly/monthly digest updates).
"""
self.init()
if self.action == 'subject_changed':
self.changed_request_data = utils.url_unpickle(
self.request.get('changed_data'))
self.unchanged_request_data = utils.url_unpickle(
self.request.get('unchanged_data'))
self.update_and_add_pending_alerts()
else:
try:
for subdomain in cache.SUBDOMAINS.keys():
for freq in ['daily', 'weekly', 'monthly']:
self.send_digests(freq, subdomain)
except DeadlineExceededError:
# The cron job will automatically be run every 5 minutes. We
# expect that in some situations, this will not finish in 30
# seconds. It is designed to simply pick up where it left off
# in the next queue of the file, so we pass off this exception
# to avoid having the system automatically restart the request.
# NOTE: this only applies to the digest system. If this script
# is run because a facility is changed, we let the AppEngine
# error management system kick in.
logging.info('mail_alerts.py: deadline exceeded error raised')
def update_and_add_pending_alerts(self):
"""Called when a subject is changed. It creates PendingAlerts for
any subscription for the changed subject. Also sends out alerts to
users who were subscribed to instant updates for this particular
subject.
"""
subject = Subject.get(self.subdomain, self.params.subject_name)
subject_key_name = self.subdomain + ':' + self.params.subject_name
subscriptions = Subscription.get_by_subject(subject_key_name)
for subscription in subscriptions:
if subscription.frequency != 'instant':
# queue pending alerts for non-instant update subscriptions
key_name = '%s:%s:%s' % (subscription.frequency,
subscription.user_email,
subscription.subject_name)
pa = PendingAlert.get_or_insert(
key_name, type=subject.type,
user_email=subscription.user_email,
subject_name=subscription.subject_name,
frequency=subscription.frequency)
if not pa.timestamp:
for update in self.changed_request_data:
setattr(pa, update['attribute'], update['old_value'])
for attribute in self.unchanged_request_data:
setattr(pa, attribute,
self.unchanged_request_data[attribute])
pa.timestamp = datetime.datetime.now()
db.put(pa)
else:
# send out alerts for those with instant update subscriptions
account = Account.all().filter('email =',
subscription.user_email).get()
email_data = Struct(
nickname=account.nickname or account.email,
domain=self.domain,
subdomain=self.subdomain,
changed_subjects={subject_key_name: (
subject.get_value('title'),
deepcopy(self.changed_request_data))}
)
email_formatter = EMAIL_FORMATTERS[
self.subdomain][subject.type](account)
body = email_formatter.format_body(email_data)
email_subject = format_email_subject(self.subdomain,
subscription.frequency)
send_email(account.locale,
'%s-%s' % (self.subdomain, self.appspot_email),
account.email, email_subject,
body, account.email_format)
def send_digests(self, frequency, subdomain):
"""Sends out a digest update for the specified frequency. Currently
available choices for the supplied frequency are ['daily', 'weekly',
'monthly']. Also removes pending alerts once an e-mail has been sent
and updates the account's next alert times.
"""
# Accounts with no daily/weekly/monthly subscriptions will be filtered
# out in this call as their next alert dates will always be set
# to an arbitrarily high constant date [see model.MAX_DATE].
query = Account.all().filter(
'next_%s_alert <' % frequency, datetime.datetime.now()).order(
'next_%s_alert' % frequency)
accounts = [account for account in query if account.email != None]
for account in accounts:
alerts_to_delete = []
unchanged_subjects = []
changed_subjects = {}
for subscription in Subscription.all().filter('user_email =',
account.email).filter('frequency =', frequency):
subject = Subject.get_by_key_name(subscription.subject_name)
pa = PendingAlert.get(frequency, account.email,
subscription.subject_name)
if pa:
values = fetch_updates(pa, subject)
changed_subjects[subscription.subject_name] = (
subject.get_value('title'), values)
alerts_to_delete.append(pa)
else:
unchanged_subjects.append(subject)
if not changed_subjects and (account.email_format == 'plain' or
not unchanged_subjects):
continue
email_data = Struct(
nickname=account.nickname or account.email,
domain=self.domain,
subdomain=subdomain,
changed_subjects=changed_subjects,
unchanged_subjects=unchanged_subjects
)
email_formatter = EMAIL_FORMATTERS[
subdomain][subject.type](account)
body = email_formatter.format_body(email_data)
email_subject = format_email_subject(subdomain, frequency)
try:
send_email(account.locale,
'%s-%s' % (subdomain, self.appspot_email),
account.email, email_subject,
body, account.email_format)
update_account_alert_time(account, frequency)
db.delete(alerts_to_delete)
db.put(account)
except OverQuotaError, message:
# Throw the error here in order to avoid mass duplication of
# the mail alerts task. If you let the system automatically
# handle the error, the combination of cron jobs and re-created
# tasks will overflow the task queue.
logging.error(message)
if __name__ == '__main__':
utils.run([('/mail_alerts', MailAlerts)], debug=True)
|
|
# Copyright 2013 NetApp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The share snapshots api."""
from oslo_log import log
import webob
from webob import exc
from manila.api import common
from manila.api.openstack import wsgi
from manila.api.views import share_snapshots as snapshot_views
from manila import db
from manila import exception
from manila.i18n import _
from manila import share
LOG = log.getLogger(__name__)
class ShareSnapshotMixin(object):
"""Mixin class for Share Snapshot Controllers."""
def _update(self, *args, **kwargs):
db.share_snapshot_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.share_api.get_snapshot(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.share_api.delete_snapshot(*args, **kwargs)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['manila.context']
try:
snapshot = self.share_api.get_snapshot(context, id)
# Snapshot with no instances is filtered out.
if(snapshot.get('status') is None):
raise exc.HTTPNotFound()
except exception.NotFound:
raise exc.HTTPNotFound()
return self._view_builder.detail(req, snapshot)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['manila.context']
LOG.info("Delete snapshot with id: %s", id, context=context)
try:
snapshot = self.share_api.get_snapshot(context, id)
self.share_api.delete_snapshot(context, snapshot)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
def index(self, req):
"""Returns a summary list of snapshots."""
req.GET.pop('name~', None)
req.GET.pop('description~', None)
req.GET.pop('description', None)
return self._get_snapshots(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of snapshots."""
req.GET.pop('name~', None)
req.GET.pop('description~', None)
req.GET.pop('description', None)
return self._get_snapshots(req, is_detail=True)
def _get_snapshots(self, req, is_detail):
"""Returns a list of snapshots."""
context = req.environ['manila.context']
search_opts = {}
search_opts.update(req.GET)
# Remove keys that are not related to share attrs
search_opts.pop('limit', None)
search_opts.pop('offset', None)
sort_key = search_opts.pop('sort_key', 'created_at')
sort_dir = search_opts.pop('sort_dir', 'desc')
# NOTE(vponomaryov): Manila stores in DB key 'display_name', but
# allows to use both keys 'name' and 'display_name'. It is leftover
# from Cinder v1 and v2 APIs.
if 'name' in search_opts:
search_opts['display_name'] = search_opts.pop('name')
if 'description' in search_opts:
search_opts['display_description'] = search_opts.pop(
'description')
# like filter
for key, db_key in (('name~', 'display_name~'),
('description~', 'display_description~')):
if key in search_opts:
search_opts[db_key] = search_opts.pop(key)
common.remove_invalid_options(context, search_opts,
self._get_snapshots_search_options())
snapshots = self.share_api.get_all_snapshots(
context,
search_opts=search_opts,
sort_key=sort_key,
sort_dir=sort_dir,
)
# Snapshots with no instances are filtered out.
snapshots = list(filter(lambda x: x.get('status') is not None,
snapshots))
limited_list = common.limited(snapshots, req)
if is_detail:
snapshots = self._view_builder.detail_list(req, limited_list)
else:
snapshots = self._view_builder.summary_list(req, limited_list)
return snapshots
def _get_snapshots_search_options(self):
"""Return share snapshot search options allowed by non-admin."""
return ('display_name', 'status', 'share_id', 'size', 'display_name~',
'display_description~', 'display_description')
def update(self, req, id, body):
"""Update a snapshot."""
context = req.environ['manila.context']
if not body or 'snapshot' not in body:
raise exc.HTTPUnprocessableEntity()
snapshot_data = body['snapshot']
valid_update_keys = (
'display_name',
'display_description',
)
update_dict = {key: snapshot_data[key]
for key in valid_update_keys
if key in snapshot_data}
try:
snapshot = self.share_api.get_snapshot(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
snapshot = self.share_api.snapshot_update(context, snapshot,
update_dict)
snapshot.update(update_dict)
return self._view_builder.detail(req, snapshot)
@wsgi.response(202)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['manila.context']
if not self.is_valid_body(body, 'snapshot'):
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
share_id = snapshot['share_id']
share = self.share_api.get(context, share_id)
# Verify that share can be snapshotted
if not share['snapshot_support']:
msg = _("Snapshot cannot be created from share '%s', because "
"share back end does not support it.") % share_id
LOG.error(msg)
raise exc.HTTPUnprocessableEntity(explanation=msg)
LOG.info("Create snapshot from share %s",
share_id, context=context)
# NOTE(rushiagr): v2 API allows name instead of display_name
if 'name' in snapshot:
snapshot['display_name'] = snapshot.get('name')
del snapshot['name']
# NOTE(rushiagr): v2 API allows description instead of
# display_description
if 'description' in snapshot:
snapshot['display_description'] = snapshot.get('description')
del snapshot['description']
new_snapshot = self.share_api.create_snapshot(
context,
share,
snapshot.get('display_name'),
snapshot.get('display_description'))
return self._view_builder.detail(
req, dict(new_snapshot.items()))
class ShareSnapshotsController(ShareSnapshotMixin, wsgi.Controller,
wsgi.AdminActionsMixin):
"""The Share Snapshots API controller for the OpenStack API."""
resource_name = 'share_snapshot'
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self):
super(ShareSnapshotsController, self).__init__()
self.share_api = share.API()
@wsgi.action('os-reset_status')
def snapshot_reset_status_legacy(self, req, id, body):
return self._reset_status(req, id, body)
@wsgi.action('os-force_delete')
def snapshot_force_delete_legacy(self, req, id, body):
return self._force_delete(req, id, body)
def create_resource():
return wsgi.Resource(ShareSnapshotsController())
|
|
"""Extracting and changing portions of the current line
All functions take cursor offset from the beginning of the line and the line of
Python code, and return None, or a tuple of the start index, end index, and the
word."""
from itertools import chain
from collections import namedtuple
from bpython.lazyre import LazyReCompile
current_word_re = LazyReCompile(r'[\w_][\w0-9._]*[(]?')
LinePart = namedtuple('LinePart', ['start', 'stop', 'word'])
def current_word(cursor_offset, line):
"""the object.attribute.attribute just before or under the cursor"""
pos = cursor_offset
matches = current_word_re.finditer(line)
start = pos
end = pos
word = None
for m in matches:
if m.start() < pos and m.end() >= pos:
start = m.start()
end = m.end()
word = m.group()
if word is None:
return None
return LinePart(start, end, word)
current_dict_key_re = LazyReCompile(r'''[\w_][\w0-9._]*\[([\w0-9._(), '"]*)''')
def current_dict_key(cursor_offset, line):
"""If in dictionary completion, return the current key"""
matches = current_dict_key_re.finditer(line)
for m in matches:
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_dict_re = LazyReCompile(r'''([\w_][\w0-9._]*)\[([\w0-9._(), '"]*)''')
def current_dict(cursor_offset, line):
"""If in dictionary completion, return the dict that should be used"""
matches = current_dict_re.finditer(line)
for m in matches:
if m.start(2) <= cursor_offset and m.end(2) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_string_re = LazyReCompile(
'''(?P<open>(?:""")|"|(?:''\')|')(?:((?P<closed>.+?)(?P=open))|'''
'''(?P<unclosed>.+))''')
def current_string(cursor_offset, line):
"""If inside a string of nonzero length, return the string (excluding
quotes)
Weaker than bpython.Repl's current_string, because that checks that a
string is a string based on previous lines in the buffer."""
for m in current_string_re.finditer(line):
i = 3 if m.group(3) else 4
if m.start(i) <= cursor_offset and m.end(i) >= cursor_offset:
return LinePart(m.start(i), m.end(i), m.group(i))
return None
current_object_re = LazyReCompile(r'([\w_][\w0-9_]*)[.]')
def current_object(cursor_offset, line):
"""If in attribute completion, the object on which attribute should be
looked up."""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
matches = current_object_re.finditer(word)
s = ''
for m in matches:
if m.end(1) + start < cursor_offset:
if s:
s += '.'
s += m.group(1)
if not s:
return None
return LinePart(start, start+len(s), s)
current_object_attribute_re = LazyReCompile(r'([\w_][\w0-9_]*)[.]?')
def current_object_attribute(cursor_offset, line):
"""If in attribute completion, the attribute being completed"""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
matches = current_object_attribute_re.finditer(word)
next(matches)
for m in matches:
if (m.start(1) + start <= cursor_offset and
m.end(1) + start >= cursor_offset):
return LinePart(m.start(1) + start, m.end(1) + start, m.group(1))
return None
current_from_import_from_re = LazyReCompile(
r'from ([\w0-9_.]*)(?:\s+import\s+([\w0-9_]+[,]?\s*)+)*')
def current_from_import_from(cursor_offset, line):
"""If in from import completion, the word after from
returns None if cursor not in or just after one of the two interesting
parts of an import: from (module) import (name1, name2)
"""
# TODO allow for as's
tokens = line.split()
if not ('from' in tokens or 'import' in tokens):
return None
matches = current_from_import_from_re.finditer(line)
for m in matches:
if ((m.start(1) < cursor_offset and m.end(1) >= cursor_offset) or
(m.start(2) < cursor_offset and m.end(2) >= cursor_offset)):
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_from_import_import_re_1 = LazyReCompile(r'from\s([\w0-9_.]*)\s+import')
current_from_import_import_re_2 = LazyReCompile(r'([\w0-9_]+)')
current_from_import_import_re_3 = LazyReCompile(r'[,][ ]([\w0-9_]*)')
def current_from_import_import(cursor_offset, line):
"""If in from import completion, the word after import being completed
returns None if cursor not in or just after one of these words
"""
baseline = current_from_import_import_re_1.search(line)
if baseline is None:
return None
match1 = current_from_import_import_re_2.search(line[baseline.end():])
if match1 is None:
return None
matches = current_from_import_import_re_3.finditer(line[baseline.end():])
for m in chain((match1, ), matches):
start = baseline.end() + m.start(1)
end = baseline.end() + m.end(1)
if start < cursor_offset and end >= cursor_offset:
return LinePart(start, end, m.group(1))
return None
current_import_re_1 = LazyReCompile(r'import')
current_import_re_2 = LazyReCompile(r'([\w0-9_.]+)')
current_import_re_3 = LazyReCompile(r'[,][ ]([\w0-9_.]*)')
def current_import(cursor_offset, line):
# TODO allow for multiple as's
baseline = current_import_re_1.search(line)
if baseline is None:
return None
match1 = current_import_re_2.search(line[baseline.end():])
if match1 is None:
return None
matches = current_import_re_3.finditer(line[baseline.end():])
for m in chain((match1, ), matches):
start = baseline.end() + m.start(1)
end = baseline.end() + m.end(1)
if start < cursor_offset and end >= cursor_offset:
return LinePart(start, end, m.group(1))
current_method_definition_name_re = LazyReCompile("def\s+([a-zA-Z_][\w]*)")
def current_method_definition_name(cursor_offset, line):
"""The name of a method being defined"""
matches = current_method_definition_name_re.finditer(line)
for m in matches:
if (m.start(1) <= cursor_offset and m.end(1) >= cursor_offset):
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_single_word_re = LazyReCompile(r"(?<![.])\b([a-zA-Z_][\w]*)")
def current_single_word(cursor_offset, line):
"""the un-dotted word just before or under the cursor"""
matches = current_single_word_re.finditer(line)
for m in matches:
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
def current_dotted_attribute(cursor_offset, line):
"""The dotted attribute-object pair before the cursor"""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
if '.' in word[1:]:
return LinePart(start, end, word)
current_string_literal_attr_re = LazyReCompile(
"('''" +
r'''|"""|'|")''' +
r'''((?:(?=([^"'\\]+|\\.|(?!\1)["']))\3)*)\1[.]([a-zA-Z_]?[\w]*)''')
def current_string_literal_attr(cursor_offset, line):
"""The attribute following a string literal"""
matches = current_string_literal_attr_re.finditer(line)
for m in matches:
if m.start(4) <= cursor_offset and m.end(4) >= cursor_offset:
return LinePart(m.start(4), m.end(4), m.group(4))
return None
|
|
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._async_compat.util import AsyncUtil
from .._conf import (
TrustAll,
TrustStore,
)
from ..addressing import Address
from ..api import (
READ_ACCESS,
TRUST_ALL_CERTIFICATES,
TRUST_SYSTEM_CA_SIGNED_CERTIFICATES,
)
from ..conf import (
Config,
PoolConfig,
SessionConfig,
WorkspaceConfig,
)
from ..meta import (
deprecation_warn,
experimental,
unclosed_resource_warn,
)
class AsyncGraphDatabase:
"""Accessor for :class:`neo4j.Driver` construction.
"""
@classmethod
@AsyncUtil.experimental_async(
"neo4j async is in experimental phase. It might be removed or changed "
"at any time (including patch releases)."
)
def driver(cls, uri, *, auth=None, **config):
"""Create a driver.
:param uri: the connection URI for the driver, see :ref:`async-uri-ref` for available URIs.
:param auth: the authentication details, see :ref:`auth-ref` for available authentication details.
:param config: driver configuration key-word arguments, see :ref:`async-driver-configuration-ref` for available key-word arguments.
:rtype: AsyncNeo4jDriver or AsyncBoltDriver
"""
from ..api import (
DRIVER_BOLT,
DRIVER_NEO4j,
parse_neo4j_uri,
parse_routing_context,
SECURITY_TYPE_SECURE,
SECURITY_TYPE_SELF_SIGNED_CERTIFICATE,
URI_SCHEME_BOLT,
URI_SCHEME_BOLT_SECURE,
URI_SCHEME_BOLT_SELF_SIGNED_CERTIFICATE,
URI_SCHEME_NEO4J,
URI_SCHEME_NEO4J_SECURE,
URI_SCHEME_NEO4J_SELF_SIGNED_CERTIFICATE,
)
driver_type, security_type, parsed = parse_neo4j_uri(uri)
# TODO: 6.0 remove "trust" config option
if "trust" in config.keys():
if config["trust"] not in (TRUST_ALL_CERTIFICATES,
TRUST_SYSTEM_CA_SIGNED_CERTIFICATES):
from neo4j.exceptions import ConfigurationError
raise ConfigurationError(
"The config setting `trust` values are {!r}"
.format(
[
TRUST_ALL_CERTIFICATES,
TRUST_SYSTEM_CA_SIGNED_CERTIFICATES,
]
)
)
if ("trusted_certificates" in config.keys()
and not isinstance(config["trusted_certificates"],
TrustStore)):
raise ConnectionError(
"The config setting `trusted_certificates` must be of type "
"neo4j.TrustAll, neo4j.TrustCustomCAs, or"
"neo4j.TrustSystemCAs but was {}".format(
type(config["trusted_certificates"])
)
)
if (security_type in [SECURITY_TYPE_SELF_SIGNED_CERTIFICATE, SECURITY_TYPE_SECURE]
and ("encrypted" in config.keys()
or "trust" in config.keys()
or "trusted_certificates" in config.keys()
or "ssl_context" in config.keys())):
from neo4j.exceptions import ConfigurationError
# TODO: 6.0 remove "trust" from error message
raise ConfigurationError(
'The config settings "encrypted", "trust", '
'"trusted_certificates", and "ssl_context" can only be used '
"with the URI schemes {!r}. Use the other URI schemes {!r} "
"for setting encryption settings."
.format(
[
URI_SCHEME_BOLT,
URI_SCHEME_NEO4J,
],
[
URI_SCHEME_BOLT_SELF_SIGNED_CERTIFICATE,
URI_SCHEME_BOLT_SECURE,
URI_SCHEME_NEO4J_SELF_SIGNED_CERTIFICATE,
URI_SCHEME_NEO4J_SECURE,
]
)
)
if security_type == SECURITY_TYPE_SECURE:
config["encrypted"] = True
elif security_type == SECURITY_TYPE_SELF_SIGNED_CERTIFICATE:
config["encrypted"] = True
config["trusted_certificates"] = TrustAll()
if driver_type == DRIVER_BOLT:
if parse_routing_context(parsed.query):
deprecation_warn(
"Creating a direct driver (`bolt://` scheme) with routing "
"context (URI parameters) is deprecated. They will be "
"ignored. This will raise an error in a future release. "
'Given URI "{}"'.format(uri)
)
# TODO: 6.0 - raise instead of warning
# raise ValueError(
# 'Routing parameters are not supported with scheme '
# '"bolt". Given URI "{}".'.format(uri)
# )
return cls.bolt_driver(parsed.netloc, auth=auth, **config)
elif driver_type == DRIVER_NEO4j:
routing_context = parse_routing_context(parsed.query)
return cls.neo4j_driver(parsed.netloc, auth=auth, routing_context=routing_context, **config)
@classmethod
def bolt_driver(cls, target, *, auth=None, **config):
""" Create a driver for direct Bolt server access that uses
socket I/O and thread-based concurrency.
"""
from .._exceptions import (
BoltHandshakeError,
BoltSecurityError,
)
try:
return AsyncBoltDriver.open(target, auth=auth, **config)
except (BoltHandshakeError, BoltSecurityError) as error:
from neo4j.exceptions import ServiceUnavailable
raise ServiceUnavailable(str(error)) from error
@classmethod
def neo4j_driver(cls, *targets, auth=None, routing_context=None, **config):
""" Create a driver for routing-capable Neo4j service access
that uses socket I/O and thread-based concurrency.
"""
from neo4j._exceptions import (
BoltHandshakeError,
BoltSecurityError,
)
try:
return AsyncNeo4jDriver.open(*targets, auth=auth, routing_context=routing_context, **config)
except (BoltHandshakeError, BoltSecurityError) as error:
from neo4j.exceptions import ServiceUnavailable
raise ServiceUnavailable(str(error)) from error
class _Direct:
default_host = "localhost"
default_port = 7687
default_target = ":"
def __init__(self, address):
self._address = address
@property
def address(self):
return self._address
@classmethod
def parse_target(cls, target):
""" Parse a target string to produce an address.
"""
if not target:
target = cls.default_target
address = Address.parse(target, default_host=cls.default_host,
default_port=cls.default_port)
return address
class _Routing:
default_host = "localhost"
default_port = 7687
default_targets = ": :17601 :17687"
def __init__(self, initial_addresses):
self._initial_addresses = initial_addresses
@property
def initial_addresses(self):
return self._initial_addresses
@classmethod
def parse_targets(cls, *targets):
""" Parse a sequence of target strings to produce an address
list.
"""
targets = " ".join(targets)
if not targets:
targets = cls.default_targets
addresses = Address.parse_list(targets, default_host=cls.default_host, default_port=cls.default_port)
return addresses
class AsyncDriver:
""" Base class for all types of :class:`neo4j.AsyncDriver`, instances of
which are used as the primary access point to Neo4j.
"""
#: Connection pool
_pool = None
#: Flag if the driver has been closed
_closed = False
def __init__(self, pool, default_workspace_config):
assert pool is not None
assert default_workspace_config is not None
self._pool = pool
self._default_workspace_config = default_workspace_config
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close()
def __del__(self):
if not self._closed:
unclosed_resource_warn(self)
# TODO: 6.0 - remove this
if not self._closed:
if not AsyncUtil.is_async_code:
deprecation_warn(
"Relying on AsyncDriver's destructor to close the session "
"is deprecated. Please make sure to close the session. "
"Use it as a context (`with` statement) or make sure to "
"call `.close()` explicitly. Future versions of the "
"driver will not close drivers automatically."
)
self.close()
@property
def encrypted(self):
"""Indicate whether the driver was configured to use encryption.
:rtype: bool"""
return bool(self._pool.pool_config.encrypted)
def session(self, **config):
"""Create a session, see :ref:`async-session-construction-ref`
:param config: session configuration key-word arguments,
see :ref:`async-session-configuration-ref` for available key-word
arguments.
:returns: new :class:`neo4j.AsyncSession` object
"""
raise NotImplementedError
async def close(self):
""" Shut down, closing any open connections in the pool.
"""
await self._pool.close()
self._closed = True
# TODO: 6.0 - remove config argument
async def verify_connectivity(self, **config):
"""Verify that the driver can establish a connection to the server.
This verifies if the driver can establish a reading connection to a
remote server or a cluster. Some data will be exchanged.
.. note::
Even if this method raises an exception, the driver still needs to
be closed via :meth:`close` to free up all resources.
:raises DriverError: if the driver cannot connect to the remote.
Use the exception to further understand the cause of the
connectivity problem.
.. versionchanged:: 5.0 the config parameters will be removed in
version 6 0. It has no effect starting with version 5.0.
"""
if config:
deprecation_warn(
"verify_connectivity() will not accept any configuration "
"parameters starting with version 6.0."
)
await self.get_server_info()
async def get_server_info(self):
"""Get information about the connected Neo4j server.
Try to establish a working read connection to the remote server or a
member of a cluster and exchange some data. Then return the contacted
server's information.
In a cluster, there is no guarantee about which server will be
contacted.
.. note::
Even if this method raises an exception, the driver still needs to
be closed via :meth:`close` to free up all resources.
:rtype: ServerInfo
:raises DriverError: if the driver cannot connect to the remote.
Use the exception to further understand the cause of the
connectivity problem.
.. versionadded:: 5.0
"""
async with self.session() as session:
return await session._get_server_info()
@experimental("Feature support query, based on Bolt protocol version and Neo4j server version will change in the future.")
async def supports_multi_db(self):
""" Check if the server or cluster supports multi-databases.
:return: Returns true if the server or cluster the driver connects to supports multi-databases, otherwise false.
:rtype: bool
.. note::
Feature support query, based on Bolt Protocol Version and Neo4j
server version will change in the future.
"""
async with self.session() as session:
await session._connect(READ_ACCESS)
return session._connection.supports_multiple_databases
class AsyncBoltDriver(_Direct, AsyncDriver):
""":class:`.AsyncBoltDriver` is instantiated for ``bolt`` URIs and
addresses a single database machine. This may be a standalone server or
could be a specific member of a cluster.
Connections established by a :class:`.AsyncBoltDriver` are always made to
the exact host and port detailed in the URI.
This class is not supposed to be instantiated externally. Use
:meth:`AsyncGraphDatabase.driver` instead.
"""
@classmethod
def open(cls, target, *, auth=None, **config):
"""
:param target:
:param auth:
:param config: The values that can be specified are found in :class: `neo4j.PoolConfig` and :class: `neo4j.WorkspaceConfig`
:return:
:rtype: :class: `neo4j.BoltDriver`
"""
from .io import AsyncBoltPool
address = cls.parse_target(target)
pool_config, default_workspace_config = Config.consume_chain(config, PoolConfig, WorkspaceConfig)
pool = AsyncBoltPool.open(address, auth=auth, pool_config=pool_config, workspace_config=default_workspace_config)
return cls(pool, default_workspace_config)
def __init__(self, pool, default_workspace_config):
_Direct.__init__(self, pool.address)
AsyncDriver.__init__(self, pool, default_workspace_config)
self._default_workspace_config = default_workspace_config
def session(self, **config):
"""
:param config: The values that can be specified are found in :class: `neo4j.SessionConfig`
:return:
:rtype: :class: `neo4j.AsyncSession`
"""
from .work import AsyncSession
session_config = SessionConfig(self._default_workspace_config, config)
SessionConfig.consume(config) # Consume the config
return AsyncSession(self._pool, session_config)
class AsyncNeo4jDriver(_Routing, AsyncDriver):
""":class:`.AsyncNeo4jDriver` is instantiated for ``neo4j`` URIs. The
routing behaviour works in tandem with Neo4j's `Causal Clustering
<https://neo4j.com/docs/operations-manual/current/clustering/>`_
feature by directing read and write behaviour to appropriate
cluster members.
This class is not supposed to be instantiated externally. Use
:meth:`AsyncGraphDatabase.driver` instead.
"""
@classmethod
def open(cls, *targets, auth=None, routing_context=None, **config):
from .io import AsyncNeo4jPool
addresses = cls.parse_targets(*targets)
pool_config, default_workspace_config = Config.consume_chain(config, PoolConfig, WorkspaceConfig)
pool = AsyncNeo4jPool.open(*addresses, auth=auth, routing_context=routing_context, pool_config=pool_config, workspace_config=default_workspace_config)
return cls(pool, default_workspace_config)
def __init__(self, pool, default_workspace_config):
_Routing.__init__(self, pool.get_default_database_initial_router_addresses())
AsyncDriver.__init__(self, pool, default_workspace_config)
def session(self, **config):
from .work import AsyncSession
session_config = SessionConfig(self._default_workspace_config, config)
SessionConfig.consume(config) # Consume the config
return AsyncSession(self._pool, session_config)
|
|
"""Bayesian variant calling with FreeBayes.
https://github.com/ekg/freebayes
"""
import os
import sys
import toolz as tz
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import chromhacks
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import annotation, bedutils, ploidy, vcfutils
from bcbio.variation.vcfutils import (get_paired_bams, is_paired_analysis,
move_vcf)
def region_to_freebayes(region):
if isinstance(region, (list, tuple)):
chrom, start, end = region
return "%s:%s..%s" % (chrom, start, end)
else:
return region
def _freebayes_options_from_config(items, config, out_file, region=None):
"""Prepare standard options from configuration input.
Input BED target files are merged to avoid overlapping regions which
cause FreeBayes to call multiple times.
Checks for empty sets of target regions after filtering for high depth,
in which case we should skip the FreeBayes run.
"""
opts = ["--genotype-qualities", "--strict-vcf"]
cur_ploidy = ploidy.get_ploidy(items, region)
base_ploidy = ploidy.get_ploidy(items)
opts += ["--ploidy", str(cur_ploidy)]
# Adjust min fraction when trying to call more sensitively in certain
# regions. This is primarily meant for pooled mitochondrial calling.
if (isinstance(region, (list, tuple)) and chromhacks.is_mitochondrial(region[0])
and cur_ploidy >= base_ploidy and "--min-alternate-fraction" not in opts and "-F" not in opts):
opts += ["--min-alternate-fraction", "0.01"]
variant_regions = bedutils.merge_overlaps(bedutils.population_variant_regions(items), items[0])
# Produce gVCF output
if any("gvcf" in dd.get_tools_on(d) for d in items):
opts += ["--gvcf", "--gvcf-chunk", "50000"]
no_target_regions = False
target = shared.subset_variant_regions(variant_regions, region, out_file, items)
if target:
if isinstance(target, basestring) and os.path.isfile(target):
if any(tz.get_in(["config", "algorithm", "coverage_interval"], x, "").lower() == "genome"
for x in items):
target = shared.remove_highdepth_regions(target, items)
if os.path.getsize(target) == 0:
no_target_regions = True
opts += ["--targets", target]
else:
opts += ["--region", region_to_freebayes(target)]
resources = config_utils.get_resources("freebayes", config)
if resources.get("options"):
opts += resources["options"]
return opts, no_target_regions
def _add_somatic_opts(opts, paired):
"""Add somatic options to current set. See _run_freebayes_paired for references.
"""
if "--min-alternate-fraction" not in opts and "-F" not in opts:
# add minimum reportable allele frequency
# FreeBayes defaults to 20%, but use 10% by default for the
# tumor case
min_af = float(utils.get_in(paired.tumor_config, ("algorithm",
"min_allele_fraction"), 10)) / 100.0
opts += " --min-alternate-fraction %s" % min_af
# Recommended settings for cancer calling
opts += (" --pooled-discrete --pooled-continuous "
"--report-genotype-likelihood-max --allele-balance-priors-off")
return opts
def run_freebayes(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run FreeBayes variant calling, either paired tumor/normal or germline calling.
"""
if is_paired_analysis(align_bams, items):
paired = get_paired_bams(align_bams, items)
if not paired.normal_bam:
call_file = _run_freebayes_caller(align_bams, items, ref_file,
assoc_files, region, out_file, somatic=paired)
else:
call_file = _run_freebayes_paired([paired.tumor_bam, paired.normal_bam],
[paired.tumor_data, paired.normal_data],
ref_file, assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_freebayes_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
def _run_freebayes_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None, somatic=None):
"""Detect SNPs and indels with FreeBayes.
Performs post-filtering to remove very low quality variants which
can cause issues feeding into GATK. Breaks variants into individual
allelic primitives for analysis and evaluation.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
freebayes = config_utils.get_program("freebayes", config)
input_bams = " ".join("-b %s" % x for x in align_bams)
opts, no_target_regions = _freebayes_options_from_config(items, config, out_file, region)
if no_target_regions:
vcfutils.write_empty_vcf(tx_out_file, config, samples=[dd.get_sample_name(d) for d in items])
else:
opts = " ".join(opts)
# Recommended options from 1000 genomes low-complexity evaluation
# https://groups.google.com/d/msg/freebayes/GvxIzjcpbas/1G6e3ArxQ4cJ
opts += " --min-repeat-entropy 1"
# Remove partial observations, which cause a preference for heterozygote calls
# https://github.com/ekg/freebayes/issues/234#issuecomment-205331765
opts += " --no-partial-observations"
if somatic:
opts = _add_somatic_opts(opts, somatic)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
# For multi-sample outputs, ensure consistent order
samples = ("-s" + ",".join([dd.get_sample_name(d) for d in items])) if len(items) > 1 else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
cmd = ("{freebayes} -f {ref_file} {opts} {input_bams} "
"""| bcftools filter -i 'ALT="<*>" || QUAL > 5' """
"| {fix_ambig} | bcftools view {samples} -a - | "
"{py_cl} -x 'bcbio.variation.freebayes.remove_missingalt(x)' | "
"vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort | "
"vt normalize -n -r {ref_file} -q - | vcfuniqalleles | vt uniq - 2> /dev/null "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with FreeBayes", {})
return out_file
def _run_freebayes_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect SNPs and indels with FreeBayes for paired tumor/normal samples.
Sources of options for FreeBayes:
mailing list: https://groups.google.com/d/msg/freebayes/dTWBtLyM4Vs/HAK_ZhJHguMJ
mailing list: https://groups.google.com/forum/#!msg/freebayes/LLH7ZfZlVNs/63FdD31rrfEJ
speedseq: https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L916
sga/freebayes: https://github.com/jts/sga-extra/blob/7e28caf71e8107b697f9be7162050e4fa259694b/
sga_generate_varcall_makefile.pl#L299
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
paired = get_paired_bams(align_bams, items)
assert paired.normal_bam, "Require normal BAM for FreeBayes paired calling and filtering"
freebayes = config_utils.get_program("freebayes", config)
opts, no_target_regions = _freebayes_options_from_config(items, config, out_file, region)
if no_target_regions:
vcfutils.write_empty_vcf(tx_out_file, config,
samples=[x for x in [paired.tumor_name, paired.normal_name] if x])
else:
opts = " ".join(opts)
opts += " --min-repeat-entropy 1"
opts += " --no-partial-observations"
opts = _add_somatic_opts(opts, paired)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
# For multi-sample outputs, ensure consistent order
samples = ("-s " + ",".join([dd.get_sample_name(d) for d in items])) if len(items) > 1 else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
bcbio_py = sys.executable
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
cl = ("{freebayes} -f {ref_file} {opts} "
"{paired.tumor_bam} {paired.normal_bam} "
"""| bcftools filter -i 'ALT="<*>" || QUAL > 5' """
"""| {bcbio_py} -c 'from bcbio.variation import freebayes; """
"""freebayes.call_somatic("{paired.tumor_name}", "{paired.normal_name}")' """
"| {fix_ambig} | bcftools view {samples} -a - | "
"{py_cl} -x 'bcbio.variation.freebayes.remove_missingalt(x)' | "
"vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort | "
"vt normalize -n -r {ref_file} -q - | vcfuniqalleles | vt uniq - 2> /dev/null "
"{compress_cmd} > {tx_out_file}")
do.run(cl.format(**locals()), "Genotyping paired variants with FreeBayes", {})
return out_file
# ## Filtering
def _check_lods(parts, tumor_thresh, normal_thresh, indexes):
"""Ensure likelihoods for tumor and normal pass thresholds.
Skipped if no FreeBayes GL annotations available.
"""
try:
gl_index = parts[8].split(":").index("GL")
except ValueError:
return True
try:
tumor_gls = [float(x) for x in parts[indexes["tumor"]].strip().split(":")[gl_index].split(",") if x != "."]
if tumor_gls:
tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls)))
else:
tumor_lod = -1.0
# No GL information, no tumor call (so fail it)
except IndexError:
tumor_lod = -1.0
try:
normal_gls = [float(x) for x in parts[indexes["normal"]].strip().split(":")[gl_index].split(",") if x != "."]
if normal_gls:
normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls)))
else:
normal_lod = normal_thresh
# No GL inofmration, no normal call (so pass it)
except IndexError:
normal_lod = normal_thresh
return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
def _check_freqs(parts, indexes):
"""Ensure frequency of tumor to normal passes a reasonable threshold.
Avoids calling low frequency tumors also present at low frequency in normals,
which indicates a contamination or persistent error.
"""
thresh_ratio = 2.7
try: # FreeBayes
ao_index = parts[8].split(":").index("AO")
ro_index = parts[8].split(":").index("RO")
except ValueError:
ao_index, ro_index = None, None
try: # VarDict
af_index = parts[8].split(":").index("AF")
except ValueError:
af_index = None
if af_index is None and ao_index is None:
# okay to skip if a gVCF record
if parts[4].find("<*>") == -1:
raise NotImplementedError("Unexpected format annotations: %s" % parts[8])
def _calc_freq(item):
try:
if ao_index is not None and ro_index is not None:
ao = sum([int(x) for x in item.split(":")[ao_index].split(",")])
ro = int(item.split(":")[ro_index])
freq = ao / float(ao + ro)
elif af_index is not None:
freq = float(item.split(":")[af_index])
else:
freq = 0.0
except (IndexError, ValueError, ZeroDivisionError):
freq = 0.0
return freq
tumor_freq, normal_freq = _calc_freq(parts[indexes["tumor"]]), _calc_freq(parts[indexes["normal"]])
return normal_freq <= 0.001 or normal_freq <= tumor_freq / thresh_ratio
def remove_missingalt(line):
"""Remove lines that are missing an alternative allele.
During cleanup of extra alleles, bcftools has an issue in complicated cases
with duplicate alleles and will end up stripping all alternative alleles.
This removes those lines to avoid issues downstream.
"""
if not line.startswith("#"):
parts = line.split("\t")
if parts[4] == ".":
return None
return line
def call_somatic(tumor_name, normal_name):
"""Call SOMATIC variants from tumor/normal calls, adding REJECT filters and SOMATIC flag.
Works from stdin and writes to stdout, finding positions of tumor and normal samples.
Uses MuTect like somatic filter based on implementation in speedseq:
https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L62
Extracts the genotype likelihoods (GLs) from FreeBayes, which are like phred scores
except not multiplied by 10.0 (https://en.wikipedia.org/wiki/Phred_quality_score).
For tumors, we retrieve the best likelihood to not be reference (the first GL) and
for normal, the best likelhood to be reference.
After calculating the likelihoods, we compare these to thresholds to pass variants
at tuned sensitivity/precision. Tuning done on DREAM synthetic 3 dataset evaluations.
We also check that the frequency of the tumor exceeds the frequency of the normal by
a threshold to avoid calls that are low frequency in both tumor and normal. This supports
both FreeBayes and VarDict output frequencies.
"""
# Thresholds are like phred scores, so 3.5 = phred35
tumor_thresh, normal_thresh = 3.5, 3.5
new_headers = ['##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="Somatic event">\n',
('##FILTER=<ID=REJECT,Description="Not somatic due to normal call frequency '
'or phred likelihoods: tumor: %s, normal %s.">\n')
% (int(tumor_thresh * 10), int(normal_thresh * 10))]
def _output_filter_line(line, indexes):
parts = line.split("\t")
if _check_lods(parts, tumor_thresh, normal_thresh, indexes) and _check_freqs(parts, indexes):
parts[7] = parts[7] + ";SOMATIC"
else:
if parts[6] in set([".", "PASS"]):
parts[6] = "REJECT"
else:
parts[6] += ";REJECT"
line = "\t".join(parts)
sys.stdout.write(line)
def _write_header(header):
for hline in header[:-1] + new_headers + [header[-1]]:
sys.stdout.write(hline)
header = []
indexes = None
for line in sys.stdin:
if not indexes:
if line.startswith("#"):
header.append(line)
else:
parts = header[-1].rstrip().split("\t")
indexes = {"tumor": parts.index(tumor_name), "normal": parts.index(normal_name)}
_write_header(header)
_output_filter_line(line, indexes)
else:
_output_filter_line(line, indexes)
# no calls, only output the header
if not indexes:
_write_header(header)
def _clean_freebayes_output(line):
"""Clean FreeBayes output to make post-processing with GATK happy.
XXX Not applied on recent versions which fix issues to be more compatible
with bgzip output, but retained in case of need.
- Remove lines from FreeBayes outputs where REF/ALT are identical:
2 22816178 . G G 0.0339196
or there are multiple duplicate alleles:
4 60594753 . TGAAA T,T
- Remove Type=Int specifications which are not valid VCF and GATK chokes
on.
"""
if line.startswith("#"):
line = line.replace("Type=Int,D", "Type=Integer,D")
return line
else:
parts = line.split("\t")
alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()]
if len(alleles) == len(set(alleles)):
return line
return None
def clean_vcf_output(orig_file, clean_fn, config, name="clean"):
"""Provide framework to clean a file in-place, with the specified clean
function.
"""
base, ext = utils.splitext_plus(orig_file)
out_file = "{0}-{1}{2}".format(base, name, ext)
if not utils.file_exists(out_file):
with open(orig_file) as in_handle:
with file_transaction(config, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
update_line = clean_fn(line)
if update_line:
out_handle.write(update_line)
move_vcf(orig_file, "{0}.orig".format(orig_file))
move_vcf(out_file, orig_file)
with open(out_file, "w") as out_handle:
out_handle.write("Moved to {0}".format(orig_file))
|
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from manilaclient.openstack.common.apiclient import exceptions as manila_ex
import mock
import testtools
from sahara import exceptions
from sahara.service import shares
from sahara.tests.unit import base
_NAMENODE_IPS = ['192.168.122.3', '192.168.122.4']
_DATANODE_IPS = ['192.168.122.5', '192.168.122.6', '192.168.122.7']
class _FakeShare(object):
def __init__(self, id='12345678-1234-1234-1234-123456789012',
share_proto='NFS',
export_location='192.168.122.1:/path',
access_list=None):
self.id = id
self.share_proto = share_proto
self.export_location = export_location
self.allow = mock.Mock()
self.deny = mock.Mock()
self.access_list = mock.Mock(return_value=access_list or [])
def _mock_node_group(ips, share_list):
# Returns a mocked node group and a list of mocked
# execute_command functions for its instances.
execute_mocks = [mock.Mock(return_value=(None, "centos")) for ip in ips]
get_id = mock.Mock(return_value=uuid.uuid4())
instances = [
mock.Mock(
internal_ip=ip,
remote=mock.Mock(
return_value=mock.Mock(
__enter__=mock.Mock(
return_value=mock.Mock(
execute_command=execute_mocks[index])),
__exit__=mock.Mock())))
for index, ip in enumerate(ips)]
node_group = mock.Mock(instances=instances,
shares=share_list,
__getitem__=get_id)
return node_group, execute_mocks
def _setup_calls():
return [
mock.call('lsb_release -is'),
mock.call('rpm -q nfs-utils || yum install -y nfs-utils',
run_as_root=True)]
def _expected_calls(local_path, remote_path, access_argument):
return [
mock.call('mkdir -p %s' % local_path, run_as_root=True),
mock.call("mount | grep '%(remote_path)s' | grep '%(local_path)s' | "
"grep nfs || mount -t nfs %(access_argument)s "
"%(remote_path)s %(local_path)s" %
{
"local_path": local_path,
"remote_path": remote_path,
"access_argument": access_argument
},
run_as_root=True)]
class TestShares(base.SaharaTestCase):
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_mount_nfs_shares_to_ng(self, f_manilaclient, f_context):
share = _FakeShare()
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(return_value=share)))
namenode_group, namenode_executors = _mock_node_group(
_NAMENODE_IPS,
[{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'rw',
'path': '/mnt/localpath'
}])
datanode_group, datanode_executors = _mock_node_group(
_DATANODE_IPS, [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group], shares=[])
shares.mount_shares(cluster)
permissions = [mock.call('ip', ip, 'rw') for ip in _NAMENODE_IPS]
share.allow.assert_has_calls(permissions, any_order=True)
for executor in namenode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/localpath', '192.168.122.1:/path', '-w'))
for executor in datanode_executors:
self.assertEqual(0, executor.call_count)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_mount_nfs_shares_to_cluster(self, f_manilaclient, f_context):
global_share = _FakeShare()
namenode_only_share = _FakeShare(
id='DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF',
export_location='192.168.122.2:/path')
all_shares = {share.id: share for share in
(global_share, namenode_only_share)}
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(
side_effect=lambda x: all_shares[x])))
namenode_group, namenode_executors = _mock_node_group(
['192.168.122.3', '192.168.122.4'],
[
{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'rw',
'path': '/mnt/localpath'
},
{
'id': 'DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF'
}
])
datanode_group, datanode_executors = _mock_node_group(
['192.168.122.5', '192.168.122.6', '192.168.122.7'], [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group],
shares=[
{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'ro',
'path': '/mnt/somanylocalpaths'
}
])
shares.mount_shares(cluster)
all_permissions = [mock.call('ip', ip, 'ro')
for ip in _NAMENODE_IPS + _DATANODE_IPS]
global_share.allow.assert_has_calls(all_permissions, any_order=True)
namenode_permissions = [mock.call('ip', ip, 'rw')
for ip in _NAMENODE_IPS]
namenode_only_share.allow.assert_has_calls(namenode_permissions,
any_order=True)
for executor in namenode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/somanylocalpaths',
'192.168.122.1:/path', '-r') +
_expected_calls('/mnt/DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF',
'192.168.122.2:/path', '-w'),
any_order=True)
self.assertEqual(6, executor.call_count)
for executor in datanode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/somanylocalpaths',
'192.168.122.1:/path', '-r'))
self.assertEqual(4, executor.call_count)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_share_does_not_exist(self, f_manilaclient, f_context):
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(
side_effect=manila_ex.NotFound)))
namenode_group, namenode_executors = _mock_node_group(
['192.168.122.3', '192.168.122.4'],
[
{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'rw',
'path': '/mnt/localpath'
},
{
'id': 'DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF'
}
])
datanode_group, datanode_executors = _mock_node_group(
['192.168.122.5', '192.168.122.6', '192.168.122.7'], [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group],
shares=[
{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'ro',
'path': '/mnt/somanylocalpaths'
}
])
with testtools.ExpectedException(exceptions.NotFoundException):
shares.mount_shares(cluster)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_acl_exists_unexpected_type(self, f_manilaclient, f_context):
share = _FakeShare(access_list=[mock.Mock(
access_level='wat', access_to=ip, access_type='ip')
for ip in _NAMENODE_IPS])
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(return_value=share)))
namenode_group, namenode_executors = _mock_node_group(
_NAMENODE_IPS,
[{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'rw',
'path': '/mnt/localpath'
}])
datanode_group, datanode_executors = _mock_node_group(
_DATANODE_IPS, [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group], shares=[])
shares.mount_shares(cluster)
self.assertEqual(0, share.allow.call_count)
for executor in namenode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/localpath', '192.168.122.1:/path', '-w'))
for executor in datanode_executors:
self.assertEqual(0, executor.call_count)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_acl_exists_no_recreate(self, f_manilaclient, f_context):
share = _FakeShare(access_list=[mock.Mock(
access_level='rw', access_to=ip, access_type='ip')
for ip in _NAMENODE_IPS])
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(return_value=share)))
namenode_group, namenode_executors = _mock_node_group(
_NAMENODE_IPS,
[{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'ro',
'path': '/mnt/localpath'
}])
datanode_group, datanode_executors = _mock_node_group(
_DATANODE_IPS, [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group], shares=[])
shares.mount_shares(cluster)
self.assertEqual(0, share.allow.call_count)
for executor in namenode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/localpath', '192.168.122.1:/path', '-r'))
for executor in datanode_executors:
self.assertEqual(0, executor.call_count)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_acl_exists_recreate(self, f_manilaclient, f_context):
share = _FakeShare(access_list=[mock.Mock(
access_level='ro', access_to=ip, access_type='ip', id="access_id")
for ip in _NAMENODE_IPS])
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(return_value=share)))
namenode_group, namenode_executors = _mock_node_group(
_NAMENODE_IPS,
[{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'rw',
'path': '/mnt/localpath'
}])
datanode_group, datanode_executors = _mock_node_group(
_DATANODE_IPS, [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group], shares=[])
shares.mount_shares(cluster)
namenode_denials = [mock.call('access_id')
for ip in _NAMENODE_IPS]
share.deny.assert_has_calls(namenode_denials)
namenode_permissions = [mock.call('ip', ip, 'rw')
for ip in _NAMENODE_IPS]
share.allow.assert_has_calls(namenode_permissions,
any_order=True)
for executor in namenode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/localpath', '192.168.122.1:/path', '-w'))
for executor in datanode_executors:
self.assertEqual(0, executor.call_count)
def test_get_share_path(self):
share_list = [
{'id': 'the_share_id',
'path': '/mnt/mymountpoint'},
{'id': 'the_share_id',
'path': '/mnt/othermountpoint'},
{'id': '123456',
'path': '/mnt/themountpoint'}
]
url = 'manila://the_share_id/the_path'
path = shares.get_share_path(url, share_list)
self.assertEqual("/mnt/mymountpoint/the_path", path)
share_list.pop(0)
path = shares.get_share_path(url, share_list)
self.assertEqual("/mnt/othermountpoint/the_path", path)
share_list.pop(0)
path = shares.get_share_path(url, share_list)
self.assertIsNone(path)
@mock.patch('sahara.utils.openstack.manila.client')
def test_get_share_path_default(self, f_manilaclient):
share_list = [
{'id': 'i_have_no_mnt'}
]
share = _FakeShare(share_list[0]['id'])
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(return_value=share)))
url = 'manila://i_have_no_mnt/the_path'
path = shares.get_share_path(url, share_list)
self.assertEqual("/mnt/i_have_no_mnt/the_path", path)
|
|
# -*- coding: utf-8 -*-
# Copyright 2016 Mirantis, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import mock
import mockfs
import os
import pytest
import sys
import jsonschema
from jimmy import cli
from mock import call
from click.testing import CliRunner
from jimmy.lib.common import yaml_reader
from jimmy.tests import base
modules_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
jimmy_dir = os.path.dirname(modules_dir)
throttle_schema_path = os.path.join(modules_dir, 'throttle', 'resources', 'schema.yaml')
jenkins_yaml_path = os.path.join(jimmy_dir, 'sample', 'input', 'jenkins.yaml')
class TestThrottleModule(base.TestCase):
def setup_method(self, method):
self.runner = CliRunner()
def teardown_method(self, method):
mockfs.restore_builtins()
@mock.patch('jimmy.lib.core.load_py_modules')
@mock.patch('subprocess.call')
def test_cli_call(self, mock_subp, mock_modules):
with open(throttle_schema_path, 'r') as f:
mock_throttle_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({os.path.join(jimmy_dir, 'lib', 'schema.yaml'): self.jimmy_schema,
os.path.join(jimmy_dir, 'jimmy.yaml'): self.mock_jimmy_yaml,
throttle_schema_path: mock_throttle_schema,
jenkins_yaml_path: '\n'.join(
[
'jenkins:',
' throttle:',
' categories:',
' - category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1',
' - throttled_node_label: slave-label2',
' max_concurrent_per_labeled: 1',
' - category_name: category2',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0'
])
})
sys.path.insert(0, modules_dir)
import throttle
import read_source
sys.path.pop(0)
mock_modules.return_value = [throttle, read_source]
os.chdir(jimmy_dir)
self.runner.invoke(cli)
calls = [call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'throttle/resources/jenkins.groovy',
'clearCategories'],
shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'throttle/resources/jenkins.groovy',
'makeThrottleCategory',
'category1', '1', '0', 'slave-label1,slave-label2', '1,1'],
shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'throttle/resources/jenkins.groovy',
'makeThrottleCategory',
'category2', '1', '0', '', ''],
shell=False)
]
mock_subp.assert_has_calls(calls, any_order=True)
assert 3 == mock_subp.call_count, "subprocess call should be equal to 3"
class TestThrottleSchema(object):
def setup_method(self, method):
with open(throttle_schema_path, 'r') as f:
mock_throttle_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({throttle_schema_path: mock_throttle_schema})
self.schema = yaml_reader.read(throttle_schema_path)
def teardown_method(self, method):
mockfs.restore_builtins()
def test_valid_repo_data(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
jsonschema.validate(repo_data, self.schema)
def test_validation_fail_if_category_name_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: 123',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_max_total_concurrent_builds_is_not_num(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: test',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'number'"
def test_validation_fail_if_max_concurrent_builds_per_node_is_not_num(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: test',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'number'"
def test_validation_fail_if_throttled_node_label_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: 123',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_max_concurrent_per_labeled_is_not_num(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'number'"
def test_password_validation_fail_for_category_name_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'category_name' is a required property"
def test_password_validation_fail_for_max_total_conc_builds_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'max_total_concurrent_builds' is a required property"
def test_password_validation_fail_for_max_conc_builds_per_node_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'max_concurrent_builds_per_node' is a required property"
def test_password_validation_fail_for_throttled_node_label_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'throttled_node_label' is a required property"
def test_password_validation_fail_for_max_concurrent_per_labeled_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'max_concurrent_per_labeled' is a required property"
def test_validation_fail_if_categories_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'array'"
def test_validation_fail_if_max_per_labeled_node_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'array'"
def test_validation_fail_for_categories_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' test: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
def test_validation_fail_for_max_per_labeled_node_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1',
' test: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
|
|
import hashlib
import random
import re
import unicodedata
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.conf import settings
from django.views.generic import View, FormView, ListView, DetailView, TemplateView
from django.http import HttpResponseRedirect
import celery
from gargoyle import gargoyle
from braces.views import LoginRequiredMixin
from utils import installed
from eve_api.models import EVEAccount, EVEPlayerCharacter
from eve_api.tasks import import_apikey, import_apikey_result, update_user_access
from eve_proxy.models import ApiAccessLog
from sso.models import ServiceAccount, Service, SSOUser, ExistingUser, ServiceError, SSOUserIPAddress
from sso.forms import UserServiceAccountForm, ServiceAccountResetForm, UserLookupForm, APIPasswordForm, EmailChangeForm, PrimaryCharacterForm, UserNoteForm
class ProfileView(LoginRequiredMixin, TemplateView):
template_name = 'sso/profile.html'
def get_profile(self, user):
try:
profile = user.get_profile()
except SSOUser.DoesNotExist:
profile = SSOUser.objects.create(user=user)
return profile
def get(self, request, *args, **kwargs):
self.profile = self.get_profile(request.user)
if self.profile.api_service_password is None or self.profile.api_service_password == '':
messages.info(request, "Please set a External Services Password before continuing.")
return HttpResponseRedirect(reverse('sso-apipassword'))
if self.profile.primary_character is None and EVEPlayerCharacter.objects.filter(eveaccount__user=request.user).count():
return HttpResponseRedirect(reverse('sso-primarycharacterupdate'))
return super(ProfileView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(ProfileView, self).get_context_data(**kwargs)
ctx.update({
'profile': self.profile,
'available_services': Service.objects.filter(groups__in=self.request.user.groups.all()).exclude(id__in=ServiceAccount.objects.filter(user=self.request.user).values('service')).count()
})
return ctx
@login_required
def service_add(request):
""" Add a service to a user's account """
clsform = UserServiceAccountForm(request.user)
if request.method == 'POST':
form = clsform(request.POST)
if form.is_valid():
acc = ServiceAccount(user=request.user, service=form.cleaned_data['service'])
acc.character = form.cleaned_data['character']
if acc.service.settings['require_password']:
if settings.GENERATE_SERVICE_PASSWORD:
acc.password = hashlib.sha1('%s%s%s' % (form.cleaned_data['character'].name, settings.SECRET_KEY, random.randint(0, 2147483647))).hexdigest()
else:
acc.password = form.cleaned_data['password']
else:
acc.password = None
if acc.service.settings['use_auth_username']:
username = request.user.username
else:
# Decode unicode and remove invalid characters
username = re.sub('[^a-zA-Z0-9_-]+', '', unicodedata.normalize('NFKD', acc.character.name).encode('ASCII', 'ignore'))
if acc.service.api_class.check_user(username):
error = "Username already exists on the target service, please contact an admin."
else:
ret = acc.service.api_class.add_user(username, acc.password, user=request.user, character=acc.character)
if ret:
acc.service_uid = ret['username']
acc.save()
error = None
else:
error = "Error creating account on the service, please retry or contact an admin if the error persists."
return render_to_response('sso/serviceaccount/created.html', locals(), context_instance=RequestContext(request))
else:
availserv = Service.objects.filter(groups__in=request.user.groups.all()).exclude(id__in=ServiceAccount.objects.filter(user=request.user).values('service')).count()
if not availserv:
return render_to_response('sso/serviceaccount/noneavailable.html', locals(), context_instance=RequestContext(request))
else:
form = clsform() # An unbound form
return render_to_response('sso/serviceaccount/add.html', locals(), context_instance=RequestContext(request))
@login_required
def service_del(request, serviceid=0, confirm_template='sso/serviceaccount/deleteconfirm.html'):
""" Delete a service from a user's account """
if serviceid > 0:
try:
acc = ServiceAccount.objects.get(id=serviceid)
except ServiceAccount.DoesNotExist:
return redirect('sso-profile')
if not acc.user == request.user:
return redirect('sso-profile')
if request.method == 'POST':
if 'confirm-delete' in request.POST:
try:
acc.delete()
except ServiceError:
messages.add_message(request, messages.ERROR, "Error deleting the service account, try again later.")
else:
messages.add_message(request, messages.INFO, "Service account successfully deleted.")
else:
return render_to_response(confirm_template, locals(), context_instance=RequestContext(request))
return redirect('sso-profile')
@login_required
def service_reset(request, serviceid, template='sso/serviceaccount/reset.html', complete_template='sso/serviceaccount/resetcomplete.html'):
""" Reset a user's password on a service """
acc = get_object_or_404(ServiceAccount, id=serviceid)
# If the account is inactive, or the service doesn't require a password, redirect
if not acc.active or ('require_password' in acc.service.settings and not acc.service.settings['require_password']):
return redirect('sso-profile')
# Check if the ServiceAccount belongs to the requesting user
if not acc.user == request.user:
return redirect('sso-profile')
if request.method == 'POST':
form = ServiceAccountResetForm(request.POST)
if form.is_valid():
if settings.GENERATE_SERVICE_PASSWORD:
passwd = hashlib.sha1('%s%s%s' % (acc.service_uid, settings.SECRET_KEY, random.randint(0, 2147483647))).hexdigest()
else:
passwd = form.cleaned_data['password']
if not acc.service.api_class.reset_password(acc.service_uid, passwd):
error = True
return render_to_response(complete_template, locals(), context_instance=RequestContext(request))
else:
form = ServiceAccountResetForm()
return render_to_response(template, locals(), context_instance=RequestContext(request))
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_url_kwarg = 'username'
slug_field = 'username'
template_name = 'sso/lookup/user.html'
def get(self, request, *args, **kwargs):
if not request.user.has_perm('sso.can_view_users') and not request.user.has_perm('sso.can_view_users_restricted'):
return HttpResponseForbidden()
return super(UserDetailView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(UserDetailView, self).get_context_data(**kwargs)
ctx.update({
'profile': self.object.get_profile(),
'services': ServiceAccount.objects.select_related('service').filter(user=self.object).only('service__name', 'service_uid', 'active'),
'characters': EVEPlayerCharacter.objects.select_related('corporation', 'corporation__alliance').filter(eveaccount__user=self.object).only('id', 'name', 'corporation__name'),
})
# If the HR app is installed, check the blacklist
if installed('hr'):
if self.request.user.has_perm('hr.add_blacklist'):
from hr.utils import blacklist_values
output = blacklist_values(self.object)
ctx.update({
'blacklisted': bool(len(output)),
'blacklist_items': output,
})
return ctx
@login_required
def user_lookup(request):
""" Lookup a user's account by providing a matching criteria """
form = UserLookupForm(request=request)
if not request.user.has_perm('sso.can_search_users'):
return redirect('sso-profile')
if request.method == 'POST':
form = UserLookupForm(request.POST, request=request)
if form.is_valid():
users = None
uids = []
username = form.cleaned_data['username'].strip()
if form.cleaned_data['type'] == '1':
users = User.objects.filter(username__icontains=username).only('username')
elif form.cleaned_data['type'] == '2':
uid = EVEAccount.objects.filter(characters__name__icontains=username).values('user')
for u in uid:
uids.append(u['user'])
users = User.objects.filter(id__in=uids).only('username')
elif installed('reddit') and gargoyle.is_active('reddit', request) and form.cleaned_data['type'] == '3':
from reddit.models import RedditAccount
uid = RedditAccount.objects.filter(username__icontains=username).values('user')
for u in uid:
uids.append(u['user'])
users = User.objects.filter(id__in=uids).only('username')
elif form.cleaned_data['type'] == '4':
users = User.objects.filter(email__icontains=username).only('username')
elif form.cleaned_data['type'] == '5':
uids = EVEAccount.objects.filter(api_user_id__icontains=username).values_list('user', flat=True)
users = User.objects.filter(id__in=uids).only('username')
elif form.cleaned_data['type'] == '6':
uids = ServiceAccount.objects.filter(service_uid__icontains=username).values_list('user', flat=True)
users = User.objects.filter(id__in=uids).only('username')
else:
messages.add_message(request, messages.ERROR, "Error parsing form, Type: %s, Value: %s" % (form.cleaned_data['type'], username))
return redirect('sso.views.user_lookup')
if users and len(users) == 1:
return redirect('sso-viewuser', username=users[0].username)
elif users and len(users) > 1:
return render_to_response('sso/lookup/lookuplist.html', locals(), context_instance=RequestContext(request))
else:
messages.add_message(request, messages.INFO, "No results found")
return redirect('sso.views.user_lookup')
return render_to_response('sso/lookup/userlookup.html', locals(), context_instance=RequestContext(request))
class APIPasswordUpdateView(LoginRequiredMixin, FormView):
form_class = APIPasswordForm
template_name = 'sso/apipassword.html'
success_url = reverse_lazy('sso-profile')
def form_valid(self, form):
profile = self.request.user.get_profile()
profile.api_service_password = hashlib.sha1(form.cleaned_data['password']).hexdigest()
profile.save()
messages.success(self.request, "Your API services password has been updated.")
return super(APIPasswordUpdateView, self).form_valid(form)
@login_required
def refresh_access(request, userid=0, corpid=0, allianceid=0):
""" Refreshes the user's access """
if userid > 0 and request.user.has_perm('sso.can_refresh_users'):
u = get_object_or_404(User, id=userid)
update_user_access(u.id)
messages.add_message(request, messages.INFO, "%s's access has been updated." % u.username)
return redirect('sso-viewuser', username=u.username)
if corpid > 0 and request.user.has_perm('sso.can_refresh_users'):
users = User.objects.filter(eveaccount__characters__corporation__id=corpid).distinct()
for u in users:
update_user_access.delay(u.id)
messages.add_message(request, messages.INFO, "%s accounts queued for update." % users.count())
return redirect('eveapi-corporation', pk=corpid)
if allianceid > 0 and request.user.has_perm('sso.can_refresh_users'):
users = User.objects.filter(eveaccount__characters__corporation__alliance__id=allianceid).distinct()
for u in users:
update_user_access.delay(u.id)
messages.add_message(request, messages.INFO, "%s accounts queued for update." % users.count())
return redirect('eveapi-alliance', pk=allianceid)
else:
update_user_access(request.user.id)
messages.add_message(request, messages.INFO, "User access updated.")
return redirect('sso-profile')
class EmailUpdateView(LoginRequiredMixin, FormView):
"""Updates a user's email address"""
form_class = EmailChangeForm
template_name = 'sso/emailchange.html'
success_url = reverse_lazy('sso-profile')
def form_valid(self, form):
self.request.user.email = form.cleaned_data['email2']
self.request.user.save()
messages.success(self.request, "E-mail address changed to %s." % form.cleaned_data['email2'])
return super(EmailUpdateView, self).form_valid(form)
class PrimaryCharacterUpdateView(LoginRequiredMixin, FormView):
"""Updates a user's primary character selection"""
form_class = PrimaryCharacterForm
template_name = 'sso/primarycharchange.html'
success_url = reverse_lazy('sso-profile')
def get_form_kwargs(self):
kwargs = super(PrimaryCharacterUpdateView, self).get_form_kwargs()
kwargs.update({
'user': self.request.user
})
return kwargs
def get_initial(self):
initial = super(PrimaryCharacterUpdateView, self).get_initial()
initial.update({
'character': self.request.user.get_profile().primary_character
})
return initial
def form_valid(self, form):
profile = self.request.user.get_profile()
profile.primary_character = form.cleaned_data['character']
profile.save()
messages.success(self.request, "Your primary character has changed to %s." % form.cleaned_data['character'])
return super(PrimaryCharacterUpdateView, self).form_valid(form)
class RedditTaggingUpdateView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
if not gargoyle.is_active('reddit', request):
return HttpResponseNotFound()
profile = request.user.get_profile()
if profile.primary_character is None:
messages.add_message(request, messages.ERROR, "Reddit account tagging requires a primary character before using. Please set one.")
if EVEPlayerCharacter.objects.filter(eveaccount__user=self.request.user).count():
return HttpResponseRedirect(reverse('sso-primarycharacterupdate'))
else:
return HttpResponseRedirect(reverse('sso-profile'))
profile.tag_reddit_accounts = not profile.tag_reddit_accounts
profile.save()
if profile.tag_reddit_accounts:
tag = 'Enabled'
else:
tag = 'Disabled'
messages.info(request, "Reddit account tagging is now %s" % tag)
if profile.tag_reddit_accounts:
name = profile.primary_character.name
else:
name = ''
for acc in self.request.user.redditaccount_set.all():
from reddit.tasks import update_user_flair
update_user_flair.delay(acc.username, name)
return HttpResponseRedirect(reverse('sso-profile'))
class AddUserNote(LoginRequiredMixin, FormView):
template_name = 'sso/add_usernote.html'
form_class = UserNoteForm
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('sso.add_ssousernote'):
return HttpResponseForbidden()
return super(AddUserNote, self).dispatch(request, *args, **kwargs)
def get_user(self):
if not hasattr(self, 'user'):
userid = self.kwargs.get('username', None)
self.user = User.objects.get(username=userid)
return self.user
def get_context_data(self, **kwargs):
ctx = super(AddUserNote, self).get_context_data(**kwargs)
ctx.update({
'user': self.get_user()
})
return ctx
def get_initial(self):
initial = super(AddUserNote, self).get_initial()
initial.update({
'user': self.get_user()
})
return initial
def get_success_url(self):
return reverse('sso-viewuser', args=[self.get_user()])
def form_valid(self, form):
obj = form.save(commit=False)
obj.created_by = self.request.user
obj.save()
return super(AddUserNote, self).form_valid(form)
class UserIPAddressView(LoginRequiredMixin, ListView):
model = SSOUserIPAddress
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('sso.can_view_users_restricted'):
return HttpResponseForbidden()
return super(UserIPAddressView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
if self.request.GET.has_key('user'):
qs = self.model.objects.filter(user__username__exact=self.request.GET.get('user'))
else:
qs = self.model.objects.filter(ip_address__contains=self.request.GET.get('ip', ''))
return qs.order_by('-last_seen')
def get_context_data(self, **kwargs):
ctx = super(UserIPAddressView, self).get_context_data(**kwargs)
ctx.update({
'ip': self.request.GET.get('ip', None),
'kuser': self.request.GET.get('user', None),
})
return ctx
|
|
# Copyright (c) 2015 Thales Services SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from concurrent import futures
import contextlib
import functools
import os
import random
import re
import select
import shlex
import signal
import subprocess
import fixtures
import netaddr
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from neutron.agent.common import config
from neutron.agent.common import ovs_lib
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.db import db_base_plugin_common
from neutron.tests import base as tests_base
from neutron.tests.common import base as common_base
from neutron.tests import tools
UNDEFINED = object()
NS_PREFIX = 'test-'
BR_PREFIX = 'test-br'
PORT_PREFIX = 'test-port'
VETH0_PREFIX = 'test-veth0'
VETH1_PREFIX = 'test-veth1'
PATCH_PREFIX = 'patch'
SS_SOURCE_PORT_PATTERN = re.compile(
r'^.*\s+\d+\s+.*:(?P<port>\d+)\s+[0-9:].*')
READ_TIMEOUT = os.environ.get('OS_TEST_READ_TIMEOUT', 5)
CHILD_PROCESS_TIMEOUT = os.environ.get('OS_TEST_CHILD_PROCESS_TIMEOUT', 20)
CHILD_PROCESS_SLEEP = os.environ.get('OS_TEST_CHILD_PROCESS_SLEEP', 0.5)
TRANSPORT_PROTOCOLS = (n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP)
def increment_ip_cidr(ip_cidr, offset=1):
"""Increment ip_cidr offset times.
example: increment_ip_cidr("1.2.3.4/24", 2) ==> "1.2.3.6/24"
"""
net0 = netaddr.IPNetwork(ip_cidr)
net = netaddr.IPNetwork(ip_cidr)
net.value += offset
if not net0.network < net.ip < net0[-1]:
tools.fail(
'Incorrect ip_cidr,offset tuple (%s,%s): "incremented" ip_cidr is '
'outside ip_cidr' % (ip_cidr, offset))
return str(net)
def set_namespace_gateway(port_dev, gateway_ip):
"""Set gateway for the namespace associated to the port."""
if not port_dev.namespace:
tools.fail('tests should not change test machine gateway')
port_dev.route.add_gateway(gateway_ip)
def assert_ping(src_namespace, dst_ip, timeout=1, count=1):
ipversion = netaddr.IPAddress(dst_ip).version
ping_command = 'ping' if ipversion == 4 else 'ping6'
ns_ip_wrapper = ip_lib.IPWrapper(src_namespace)
ns_ip_wrapper.netns.execute([ping_command, '-c', count, '-W', timeout,
dst_ip])
@contextlib.contextmanager
def async_ping(namespace, ips):
with futures.ThreadPoolExecutor(max_workers=len(ips)) as executor:
fs = [executor.submit(assert_ping, namespace, ip, count=10)
for ip in ips]
yield lambda: all(f.done() for f in fs)
futures.wait(fs)
for f in fs:
f.result()
def assert_no_ping(src_namespace, dst_ip, timeout=1, count=1):
try:
assert_ping(src_namespace, dst_ip, timeout, count)
except RuntimeError:
pass
else:
tools.fail("destination ip %(destination)s is replying to ping from "
"namespace %(ns)s, but it shouldn't" %
{'ns': src_namespace, 'destination': dst_ip})
def assert_arping(src_namespace, dst_ip, source=None, timeout=1, count=1):
"""Send arp request using arping executable.
NOTE: ARP protocol is used in IPv4 only. IPv6 uses Neighbour Discovery
Protocol instead.
"""
ns_ip_wrapper = ip_lib.IPWrapper(src_namespace)
arping_cmd = ['arping', '-c', count, '-w', timeout]
if source:
arping_cmd.extend(['-s', source])
arping_cmd.append(dst_ip)
ns_ip_wrapper.netns.execute(arping_cmd)
def assert_no_arping(src_namespace, dst_ip, source=None, timeout=1, count=1):
try:
assert_arping(src_namespace, dst_ip, source, timeout, count)
except RuntimeError:
pass
else:
tools.fail("destination ip %(destination)s is replying to arp from "
"namespace %(ns)s, but it shouldn't" %
{'ns': src_namespace, 'destination': dst_ip})
def _get_source_ports_from_ss_output(output):
ports = set()
for line in output.splitlines():
match = SS_SOURCE_PORT_PATTERN.match(line)
if match:
ports.add(match.group('port'))
return ports
def get_unused_port(used, start=1024, end=65535):
candidates = set(range(start, end + 1))
return random.choice(list(candidates - used))
def get_free_namespace_port(protocol, namespace=None):
"""Return an unused port from given namespace
WARNING: This function returns a port that is free at the execution time of
this function. If this port is used later for binding then there
is a potential danger that port will be no longer free. It's up to
the programmer to handle error if port is already in use.
:param protocol: Return free port for given protocol. Supported protocols
are 'tcp' and 'udp'.
"""
if protocol == n_const.PROTO_NAME_TCP:
param = '-tna'
elif protocol == n_const.PROTO_NAME_UDP:
param = '-una'
else:
raise ValueError("Unsupported procotol %s" % protocol)
ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
output = ip_wrapper.netns.execute(['ss', param])
used_ports = _get_source_ports_from_ss_output(output)
return get_unused_port(used_ports)
def create_patch_ports(source, destination):
"""Hook up two OVS bridges.
The result is two patch ports, each end connected to a bridge.
The two patch port names will start with 'patch-', followed by identical
four characters. For example patch-xyzw-fedora, and patch-xyzw-ubuntu,
where fedora and ubuntu are random strings.
:param source: Instance of OVSBridge
:param destination: Instance of OVSBridge
"""
common = tests_base.get_rand_name(max_length=4, prefix='')
prefix = '%s-%s-' % (PATCH_PREFIX, common)
source_name = tests_base.get_rand_device_name(prefix=prefix)
destination_name = tests_base.get_rand_device_name(prefix=prefix)
source.add_patch_port(source_name, destination_name)
destination.add_patch_port(destination_name, source_name)
class RootHelperProcess(subprocess.Popen):
def __init__(self, cmd, *args, **kwargs):
for arg in ('stdin', 'stdout', 'stderr'):
kwargs.setdefault(arg, subprocess.PIPE)
self.namespace = kwargs.pop('namespace', None)
self.cmd = cmd
if self.namespace is not None:
cmd = ['ip', 'netns', 'exec', self.namespace] + cmd
root_helper = config.get_root_helper(utils.cfg.CONF)
cmd = shlex.split(root_helper) + cmd
self.child_pid = None
super(RootHelperProcess, self).__init__(cmd, *args, **kwargs)
self._wait_for_child_process()
def kill(self, sig=signal.SIGKILL):
pid = self.child_pid or str(self.pid)
utils.execute(['kill', '-%d' % sig, pid], run_as_root=True)
def read_stdout(self, timeout=None):
return self._read_stream(self.stdout, timeout)
@staticmethod
def _read_stream(stream, timeout):
if timeout:
poller = select.poll()
poller.register(stream.fileno())
poll_predicate = functools.partial(poller.poll, 1)
utils.wait_until_true(poll_predicate, timeout, 0.1,
RuntimeError(
'No output in %.2f seconds' % timeout))
return stream.readline()
def writeline(self, data):
self.stdin.write(data + os.linesep)
self.stdin.flush()
def _wait_for_child_process(self, timeout=CHILD_PROCESS_TIMEOUT,
sleep=CHILD_PROCESS_SLEEP):
def child_is_running():
child_pid = utils.get_root_helper_child_pid(
self.pid, run_as_root=True)
if utils.pid_invoked_with_cmdline(child_pid, self.cmd):
return True
utils.wait_until_true(
child_is_running,
timeout,
exception=RuntimeError("Process %s hasn't been spawned "
"in %d seconds" % (self.cmd, timeout)))
self.child_pid = utils.get_root_helper_child_pid(
self.pid, run_as_root=True)
class NetcatTester(object):
TCP = n_const.PROTO_NAME_TCP
UDP = n_const.PROTO_NAME_UDP
def __init__(self, client_namespace, server_namespace, address,
dst_port, protocol, server_address='0.0.0.0', src_port=None):
"""
Tool for testing connectivity on transport layer using netcat
executable.
The processes are spawned lazily.
:param client_namespace: Namespace in which netcat process that
connects to other netcat will be spawned
:param server_namespace: Namespace in which listening netcat process
will be spawned
:param address: Server address from client point of view
:param dst_port: Port on which netcat listens
:param protocol: Transport protocol, either 'tcp' or 'udp'
:param server_address: Address in server namespace on which netcat
should listen
:param src_port: Source port of netcat process spawned in client
namespace - packet will have src_port in TCP/UDP
header with this value
"""
self.client_namespace = client_namespace
self.server_namespace = server_namespace
self._client_process = None
self._server_process = None
self.address = address
self.server_address = server_address
self.dst_port = str(dst_port)
self.src_port = str(src_port) if src_port else None
if protocol not in TRANSPORT_PROTOCOLS:
raise ValueError("Unsupported protocol %s" % protocol)
self.protocol = protocol
@property
def client_process(self):
if not self._client_process:
self.establish_connection()
return self._client_process
@property
def server_process(self):
if not self._server_process:
self._spawn_server_process()
return self._server_process
def _spawn_server_process(self):
self._server_process = self._spawn_nc_in_namespace(
self.server_namespace,
address=self.server_address,
listen=True)
@property
def is_established(self):
return bool(self._client_process and not self._client_process.poll())
def establish_connection(self):
if self._client_process:
raise RuntimeError('%(proto)s connection to %(ip_addr)s is already'
' established' %
{'proto': self.protocol,
'ip_addr': self.address})
if not self._server_process:
self._spawn_server_process()
self._client_process = self._spawn_nc_in_namespace(
self.client_namespace,
address=self.address)
if self.protocol == self.UDP:
# Create an ASSURED entry in conntrack table for UDP packets,
# that requires 3-way communication
# 1st transmission creates UNREPLIED
# 2nd transmission removes UNREPLIED
# 3rd transmission creates ASSURED
data = 'foo'
self.client_process.writeline(data)
self.server_process.read_stdout(READ_TIMEOUT)
self.server_process.writeline(data)
self.client_process.read_stdout(READ_TIMEOUT)
self.client_process.writeline(data)
self.server_process.read_stdout(READ_TIMEOUT)
def test_connectivity(self, respawn=False):
testing_string = uuidutils.generate_uuid()
if respawn:
self.stop_processes()
self.client_process.writeline(testing_string)
message = self.server_process.read_stdout(READ_TIMEOUT).strip()
self.server_process.writeline(message)
message = self.client_process.read_stdout(READ_TIMEOUT).strip()
return message == testing_string
def _spawn_nc_in_namespace(self, namespace, address, listen=False):
cmd = ['nc', address, self.dst_port]
if self.protocol == self.UDP:
cmd.append('-u')
if listen:
cmd.append('-l')
if self.protocol == self.TCP:
cmd.append('-k')
else:
cmd.extend(['-w', '20'])
if self.src_port:
cmd.extend(['-p', self.src_port])
proc = RootHelperProcess(cmd, namespace=namespace)
return proc
def stop_processes(self):
for proc_attr in ('_client_process', '_server_process'):
proc = getattr(self, proc_attr)
if proc:
if proc.poll() is None:
proc.kill()
proc.wait()
setattr(self, proc_attr, None)
class NamespaceFixture(fixtures.Fixture):
"""Create a namespace.
:ivar ip_wrapper: created namespace
:type ip_wrapper: IPWrapper
:ivar name: created namespace name
:type name: str
"""
def __init__(self, prefix=NS_PREFIX):
super(NamespaceFixture, self).__init__()
self.prefix = prefix
def _setUp(self):
ip = ip_lib.IPWrapper()
self.name = self.prefix + uuidutils.generate_uuid()
self.addCleanup(self.destroy)
self.ip_wrapper = ip.ensure_namespace(self.name)
def destroy(self):
if self.ip_wrapper.netns.exists(self.name):
self.ip_wrapper.netns.delete(self.name)
class VethFixture(fixtures.Fixture):
"""Create a veth.
:ivar ports: created veth ports
:type ports: IPDevice 2-uplet
"""
def _setUp(self):
ip_wrapper = ip_lib.IPWrapper()
self.ports = common_base.create_resource(
VETH0_PREFIX,
lambda name: ip_wrapper.add_veth(name, self.get_peer_name(name)))
self.addCleanup(self.destroy)
def destroy(self):
for port in self.ports:
ip_wrapper = ip_lib.IPWrapper(port.namespace)
if (ip_wrapper.netns.exists(port.namespace) or
port.namespace is None):
try:
ip_wrapper.del_veth(port.name)
break
except RuntimeError:
# NOTE(cbrandily): It seems a veth is automagically deleted
# when a namespace owning a veth endpoint is deleted.
pass
@staticmethod
def get_peer_name(name):
if name.startswith(VETH0_PREFIX):
return name.replace(VETH0_PREFIX, VETH1_PREFIX)
elif name.startswith(VETH1_PREFIX):
return name.replace(VETH1_PREFIX, VETH0_PREFIX)
else:
tools.fail('%s is not a valid VethFixture veth endpoint' % name)
@six.add_metaclass(abc.ABCMeta)
class PortFixture(fixtures.Fixture):
"""Create a port.
:ivar port: created port
:type port: IPDevice
:ivar bridge: port bridge
"""
def __init__(self, bridge=None, namespace=None, mac=None, port_id=None):
super(PortFixture, self).__init__()
self.bridge = bridge
self.namespace = namespace
self.mac = (
mac or db_base_plugin_common.DbBasePluginCommon._generate_mac())
self.port_id = port_id or uuidutils.generate_uuid()
@abc.abstractmethod
def _create_bridge_fixture(self):
pass
@abc.abstractmethod
def _setUp(self):
super(PortFixture, self)._setUp()
if not self.bridge:
self.bridge = self.useFixture(self._create_bridge_fixture()).bridge
@classmethod
def get(cls, bridge, namespace=None, mac=None, port_id=None):
"""Deduce PortFixture class from bridge type and instantiate it."""
if isinstance(bridge, ovs_lib.OVSBridge):
return OVSPortFixture(bridge, namespace, mac, port_id)
if isinstance(bridge, bridge_lib.BridgeDevice):
return LinuxBridgePortFixture(bridge, namespace)
if isinstance(bridge, VethBridge):
return VethPortFixture(bridge, namespace)
tools.fail('Unexpected bridge type: %s' % type(bridge))
class OVSBridgeFixture(fixtures.Fixture):
"""Create an OVS bridge.
:ivar prefix: bridge name prefix
:type prefix: str
:ivar bridge: created bridge
:type bridge: OVSBridge
"""
def __init__(self, prefix=BR_PREFIX):
super(OVSBridgeFixture, self).__init__()
self.prefix = prefix
def _setUp(self):
ovs = ovs_lib.BaseOVS()
self.bridge = common_base.create_resource(self.prefix, ovs.add_bridge)
self.addCleanup(self.bridge.destroy)
class OVSPortFixture(PortFixture):
def _create_bridge_fixture(self):
return OVSBridgeFixture()
def _setUp(self):
super(OVSPortFixture, self)._setUp()
interface_config = cfg.ConfigOpts()
interface_config.register_opts(interface.OPTS)
ovs_interface = interface.OVSInterfaceDriver(interface_config)
port_name = tests_base.get_rand_device_name(PORT_PREFIX)
ovs_interface.plug_new(
None,
self.port_id,
port_name,
self.mac,
bridge=self.bridge.br_name,
namespace=self.namespace)
self.addCleanup(self.bridge.delete_port, port_name)
self.port = ip_lib.IPDevice(port_name, self.namespace)
class LinuxBridgeFixture(fixtures.Fixture):
"""Create a linux bridge.
:ivar bridge: created bridge
:type bridge: BridgeDevice
:ivar namespace: created bridge namespace
:type namespace: str
"""
def __init__(self, prefix=BR_PREFIX, namespace=UNDEFINED):
super(LinuxBridgeFixture, self).__init__()
self.prefix = prefix
self.namespace = namespace
def _setUp(self):
if self.namespace is UNDEFINED:
self.namespace = self.useFixture(NamespaceFixture()).name
self.bridge = common_base.create_resource(
self.prefix,
bridge_lib.BridgeDevice.addbr,
namespace=self.namespace)
self.addCleanup(self.bridge.delbr)
self.bridge.link.set_up()
self.addCleanup(self.bridge.link.set_down)
class LinuxBridgePortFixture(PortFixture):
"""Create a linux bridge port.
:ivar port: created port
:type port: IPDevice
:ivar br_port: bridge side veth peer port
:type br_port: IPDevice
"""
def _create_bridge_fixture(self):
return LinuxBridgeFixture()
def _setUp(self):
super(LinuxBridgePortFixture, self)._setUp()
self.port, self.br_port = self.useFixture(VethFixture()).ports
# bridge side
br_ip_wrapper = ip_lib.IPWrapper(self.bridge.namespace)
br_ip_wrapper.add_device_to_namespace(self.br_port)
self.bridge.addif(self.br_port)
self.br_port.link.set_up()
# port side
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
class VethBridge(object):
def __init__(self, ports):
self.ports = ports
self.unallocated_ports = set(self.ports)
def allocate_port(self):
try:
return self.unallocated_ports.pop()
except KeyError:
tools.fail('All FakeBridge ports (%s) are already allocated.' %
len(self.ports))
class VethBridgeFixture(fixtures.Fixture):
"""Simulate a bridge with a veth.
:ivar bridge: created bridge
:type bridge: FakeBridge
"""
def _setUp(self):
ports = self.useFixture(VethFixture()).ports
self.bridge = VethBridge(ports)
class VethPortFixture(PortFixture):
"""Create a veth bridge port.
:ivar port: created port
:type port: IPDevice
"""
def _create_bridge_fixture(self):
return VethBridgeFixture()
def _setUp(self):
super(VethPortFixture, self)._setUp()
self.port = self.bridge.allocate_port()
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
|
|
# -*- coding: utf-8 -*-
# This script is designed to be run as a Web2Py application:
# python web2py.py -S eden -M -R applications/eden/modules/tests/suite.py
# or
# python web2py.py -S eden -M -R applications/eden/modules/tests/suite.py -A testscript
import argparse
import unittest
from gluon import current
from gluon.storage import Storage
current.data = Storage()
# @ToDo: Load these only when running Selenium tests
# (shouldn't be required for Smoke tests)
# (means removing the *)
from selenium import webdriver
from tests.asset import *
from tests.inv import *
from tests.member import *
from tests.org import *
from tests.project import *
from tests.staff import *
from tests.volunteer import *
from tests.helpers import *
from tests.event import *
from tests.irs import *
from tests.person import *
def loadAllTests():
folder = request.folder
# Run the file modules/templates/<current_template>/tests.py to get tests list.
found = False
templates = settings.get_template()
if not isinstance(templates, (tuple, list)):
templates = (templates,)
for template in templates[::-1]:
path = os.path.join(folder,
"modules", "templates",
template,
"tests.py")
if os.path.exists(path):
settings.exec_template(path)
found = True
break
if not found:
# Fallback to the default template tests.
path = os.path.join(folder,
"modules", "templates",
"default",
"tests.py")
settings.exec_template(path)
tests_list = current.selenium_tests
loadTests = unittest.TestLoader().loadTestsFromTestCase
# Initialise the suite with the first test.
exec("suite = loadTests(%s)" % tests_list[0])
# Shortcut
addTests = suite.addTests
# Add all tests to the suite.
for i in range(1, len(tests_list)):
exec("addTests(loadTests(%s))" % tests_list[i])
return suite
# Set up the command line arguments
desc = "Script to run the Sahana Eden test suite."
parser = argparse.ArgumentParser(description = desc)
parser.add_argument("-C", "--class",
help = "Name of class to run")
method_desc = """Name of method to run, this is used in conjunction with the
class argument or with the name of the class followed by the name of the method
separated with a period, class.method.
"""
parser.add_argument("-M",
"--method",
"--test",
help = method_desc)
parser.add_argument("-A",
"--auth",
help = "web2py default argument feed")
parser.add_argument("-V", "--verbose",
type = int,
default = 2,
help = "The level of verbose reporting")
parser.add_argument("--nohtml",
action='store_const',
const=True,
help = "Disable HTML reporting.")
parser.add_argument("--html-path",
help = "Path where the HTML report will be saved.",
default = "")
parser.add_argument("--html-name-date",
action='store_const',
const=True,
help = "Include just the date in the name of the HTML report.")
suite_desc = """This will execute a standard testing schedule. The valid values
are, smoke, quick, complete and full. If a method or class options is selected
the the suite will be ignored.
The suite options can be described as follows:
smoke: This will run the broken link test
quick: This will run all the tests marked as essential
complete: This will run all tests except those marked as long
full: This will run all tests
"""
parser.add_argument("--suite",
help = suite_desc,
choices = ["smoke", "roles", "quick", "complete", "full"],
default = "quick")
parser.add_argument("--link-depth",
type = int,
default = 16,
help = "The recursive depth when looking for links")
desc = """This will record the timings in a spreadsheet file. The data
will be accumulated over time holding a maximum of 100 results, The file will
automatically rotated. This will hold details for another program to analyse.
The file will be written to the same location as the HTML report.
"""
parser.add_argument("-r",
"--record-timings",
action='store_const',
const=True,
help = desc)
up_desc = """The user name and password, separated by a /. Multiple user name
and passwords can be added by separating them with a comma. If multiple user
name and passwords are provided then the same test will be run sequentially
using the given user in each case.
"""
parser.add_argument("--user-password",
default = "admin@example.com/testing",
help = up_desc)
parser.add_argument("--keep-browser-open",
help = "Keep the browser open once the tests have finished running",
action='store_const',
const = True)
parser.add_argument("--browser",
help = "Set the browser to use (Firefox/Chrome)",
action = "store",
default = "Firefox")
desc = """Run the smoke tests even if debug is set to true.
With debug on it can add up to a second per link and given that a full run
of the smoke tests will include thousands of links the difference of having
this setting on can be measured in hours.
"""
parser.add_argument("--force-debug",
action='store_const',
const=True,
help = desc)
desc = """Set a threshold in seconds.
If in the smoke tests it takes longer than this to get the link then it will be reported.
"""
parser.add_argument("--threshold",
type = int,
default = 10,
help = desc)
desc = """Smoke test report only.
Don't actually run the smoke tests but rebuild the smoke test report.
"""
parser.add_argument("--smoke-report",
action='store_const',
const=True,
help = desc)
argsObj = parser.parse_args()
args = argsObj.__dict__
active_driver = {'firefox': webdriver.Firefox,
'chrome': webdriver.Chrome}[args['browser'].lower()]
# Read Settings
settings = current.deployment_settings
public_url = settings.get_base_public_url()
base_url = "%s/%s" % (public_url, current.request.application)
system_name = settings.get_system_name()
# Store these to be available to modules
config = current.test_config = Storage()
config.system_name = system_name
config.timeout = 5 # seconds
config.url = base_url
base_dir = os.path.join(os.getcwd(), "applications", current.request.application)
test_dir = os.path.join(base_dir, "modules", "tests")
config.base_dir = base_dir
if not args["suite"] == "smoke" and settings.get_ui_navigate_away_confirm():
print "The tests will fail unless you have settings.ui.navigate_away_confirm = False in models/000_config.py"
exit()
if args["suite"] == "smoke" or args["suite"] == "complete":
if settings.get_base_debug() and not args["force_debug"]:
print "settings.base.debug is set to True in 000_config.py, either set it to False or use the --force-debug switch"
exit()
config.record_timings = args["record_timings"]
if config.record_timings:
path = args["html_path"]
config.record_timings_filename = os.path.join(path, "Sahana-Eden-record-timings.xls")
config.record_summary_filename = os.path.join(path, "Sahana-Eden-record-summary.xls")
config.verbose = args["verbose"]
browser_open = False
# @todo test with invalid class and methods passed as CLA
if args["method"]:
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
if args["class"]:
name = "%s.%s" % (args["class"], args["method"])
else:
name = args["method"]
suite = unittest.TestLoader().loadTestsFromName(args["method"],
globals()[args["class"]]
)
elif args["class"]:
# Run a single Selenium test
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = unittest.TestLoader().loadTestsFromTestCase(globals()[args["class"]])
elif args["suite"] == "smoke":
# Run Smoke tests
try:
from tests.smoke import *
broken_links = BrokenLinkTest()
broken_links.setReportOnly( args["smoke_report"])
broken_links.setDepth(args["link_depth"])
broken_links.setThreshold(args["threshold"])
broken_links.setUser(args["user_password"])
suite = unittest.TestSuite()
suite.addTest(broken_links)
except NameError as msg:
from s3 import s3_debug
s3_debug("%s, unable to run the smoke tests." % msg)
pass
elif args["suite"] == "roles":
# Run Roles tests
from tests.roles.test_roles import *
suite = test_roles()
elif args["suite"] == "complete":
# Run all Selenium Tests & Smoke Tests
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = loadAllTests()
try:
from tests.smoke import *
broken_links = BrokenLinkTest()
broken_links.setReportOnly( args["smoke_report"])
broken_links.setDepth(args["link_depth"])
broken_links.setThreshold(args["threshold"])
broken_links.setUser(args["user_password"])
suite.addTest(broken_links)
except NameError as msg:
from s3 import s3_debug
s3_debug("%s, unable to run the smoke tests." % msg)
pass
else:
# Run all Selenium Tests
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = loadAllTests()
config.html = False
if args["nohtml"]:
unittest.TextTestRunner(verbosity=config.verbose).run(suite)
else:
try:
path = args["html_path"]
if args["html_name_date"]:
filename = "Sahana-Eden-%s.html" % current.request.now.date()
else:
filename = "Sahana-Eden-%s.html" % current.request.now
# Windows compatibility
filename = filename.replace(":", "-")
fullname = os.path.join(path, filename)
fp = open(fullname, "wb")
config.html = True
from tests.runner import EdenHTMLTestRunner
runner = EdenHTMLTestRunner(stream = fp,
title = "Sahana Eden",
verbosity = config.verbose,
)
runner.run(suite)
except ImportError:
config.html = False
unittest.TextTestRunner(verbosity=config.verbose).run(suite)
# Cleanup
if browser_open and not args["keep_browser_open"]:
browser.close()
# END =========================================================================
|
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-retail documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-retail"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-retail",
"github_user": "googleapis",
"github_repo": "python-retail",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-retail-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-retail.tex",
"google-cloud-retail Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(root_doc, "google-cloud-retail", "google-cloud-retail Documentation", [author], 1,)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-retail",
"google-cloud-retail Documentation",
author,
"google-cloud-retail",
"google-cloud-retail Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
|
"""
Extensible Authorizer
"""
import logging
import operator
from functools import reduce
from flowsieve import events
from flowsieve.acl.acl_result import ACLResult
from flowsieve.packet.eapol import ETH_TYPE_EAPOL
from flowsieve.secure_switch import SecureSwitch
from flowsieve.user_store import UserStore
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib.mac import BROADCAST_STR
from ryu.lib.packet import ethernet, packet
from yaml import YAMLError, load
class Authorizer(app_manager.RyuApp):
COOKIE_AUTHORIZER = 0xf200
COOKIE_DROP = COOKIE_AUTHORIZER | 0x01
def __init__(self, *args, **kwargs):
super(Authorizer, self).__init__(*args, **kwargs)
self._mac_to_users = {}
self._authenticated_ports = set()
self._user_store = UserStore.get_instance()
self._topology = Topology()
def _get_user_by_mac(self, mac):
"""Get user object by source MAC address"""
if mac not in self._mac_to_users:
return None
user_name = self._mac_to_users[mac]
return self._user_store.get_user(user_name)
def _install_drop_flow_to_port(self, dp, port_no):
"""Install flow rules to drop all packets to port_no
"""
ofproto = dp.ofproto
ofproto_parser = dp.ofproto_parser
match = ofproto_parser.OFPMatch(in_port=port_no)
mod = dp.ofproto_parser.OFPFlowMod(
datapath=dp, match=match, cookie=Authorizer.COOKIE_DROP,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0x0000,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=[])
dp.send_msg(mod)
def _delete_switch_flows(self, dp, port_no):
"""Delete L2 switch flows on EAPOL Logoff
"""
ofproto = dp.ofproto
ofproto_parser = dp.ofproto_parser
match_in = ofproto_parser.OFPMatch(in_port=port_no)
mod_inport = dp.ofproto_parser.OFPFlowMod(
datapath=dp, match=match_in, cookie=SecureSwitch.COOKIE_FORWARD,
command=ofproto.OFPFC_DELETE)
dp.send_msg(mod_inport)
match_out = ofproto_parser.OFPMatch()
mod_outport = dp.ofproto_parser.OFPFlowMod(
datapath=dp, match=match_out, cookie=SecureSwitch.COOKIE_FORWARD,
command=ofproto.OFPFC_DELETE, out_port=port_no)
dp.send_msg(mod_outport)
def _get_dpset(self):
return app_manager.lookup_service_brick("dpset")
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
dpid = msg.datapath.id
port = msg.in_port
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
if eth.ethertype != ETH_TYPE_EAPOL:
if not self._is_allowed_port(dpid, port):
self._install_drop_flow_to_port(msg.datapath, port)
@set_ev_cls(events.EventPortAuthorized)
def _event_port_authorized_handler(self, ev):
self._mac_to_users[ev.mac] = ev.user_name
self._authenticated_ports.add((ev.dpid, ev.port))
dp = self._get_dpset().get(ev.dpid)
if dp is None:
return
ofproto_parser = dp.ofproto_parser
ofproto = dp.ofproto
match = ofproto_parser.OFPMatch(in_port=ev.port)
mod = dp.ofproto_parser.OFPFlowMod(
datapath=dp, match=match, cookie=Authorizer.COOKIE_DROP,
command=ofproto.OFPFC_DELETE)
dp.send_msg(mod)
@set_ev_cls(events.EventPortLoggedOff)
def _event_port_loggedoff_handler(self, ev):
if ev.mac in self._mac_to_users:
del self._mac_to_users[ev.mac]
self._authenticated_ports.discard((ev.dpid, ev.port))
dp = self._get_dpset().get(ev.dpid)
if dp is None:
return
self._delete_switch_flows(dp, ev.port)
def _is_allowed_port(self, dpid, port):
return (dpid, port) in self._authenticated_ports or \
(dpid, port) in self._topology.trusted_ports
@set_ev_cls(events.AuthorizeRequest)
def _authorize_request_handler(self, req):
pkt = packet.Packet(req.msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
src = eth.src
dst = eth.dst
dpid = req.msg.datapath.id
port = req.msg.in_port
src_user = self._get_user_by_mac(src)
dst_user = self._get_user_by_mac(dst)
result = ACLResult(False)
if not self._is_allowed_port(dpid, port):
pass
elif src_user is not None and dst_user is not None:
acl_results = [acl.allows_packet(pkt, src_user) for acl in
dst_user.acls.values()]
result = reduce(operator.add, acl_results)
if dst == BROADCAST_STR:
result = ACLResult(True)
reply = events.AuthorizeReply(req.dst, result)
self.reply_to_request(req, reply)
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _event_state_change(self, ev):
msg = ev.msg
ofproto = msg.datapath.ofproto
dpid = msg.datapath.id
port = msg.desc
reason = msg.reason
if reason == ofproto.OFPPR_DELETE or reason == ofproto.OFPPR_MODIFY \
and (msg.desc.state & ofproto.OFPPS_LINK_DOWN):
port_no = port.port_no
self._authenticated_ports.discard((dpid, port_no))
class Topology(object):
DEFAULT_TOPOLOGY_CONF_FILE = "conf/topology.yml"
def __init__(self, file_name=None):
super(Topology, self).__init__()
if file_name is None:
file_name = self.__class__.DEFAULT_TOPOLOGY_CONF_FILE
self.config_file = file_name
self._logger = logging.getLogger(self.__class__.__name__)
self.trusted_ports = set()
self._read_config_file()
def _read_config_file(self):
try:
with open(self.config_file) as f:
data = load(f)
except IOError:
self._logger.error("Could not open %s", self.config_file)
return
except YAMLError:
self._logger.error("Error while parsing %s", self.user_role_file)
return
if "switches" in data:
self._logger.info("Reading switch data")
self._read_switch_data(data["switches"])
def _read_switch_data(self, data):
for item in data:
if "dpid" not in item:
self.warning("DPID not defined")
continue
try:
dpid = int(item["dpid"])
except ValueError:
self.warning("%s is not a valid DPID", item["data"])
continue
if "trusted_ports" in item:
trusted_ports = item["trusted_ports"]
for port in trusted_ports:
try:
port_num = int(port)
except ValueError:
self.warning("%s is not a valid port number", port)
continue
self.trusted_ports.add((dpid, port_num))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.